Examples of FlushInfo


Examples of org.apache.lucene.store.FlushInfo

    // what we use in RAM:
    long bytes =  totalPostings * 8 + totalPayloadBytes;

    SegmentWriteState writeState = new SegmentWriteState(null, dir,
                                                         segmentInfo, newFieldInfos,
                                                         32, null, new IOContext(new FlushInfo(maxDoc, bytes)));
    FieldsConsumer fieldsConsumer = codec.postingsFormat().fieldsConsumer(writeState);

    for(Map.Entry<String,Map<BytesRef,Long>> fieldEnt : fields.entrySet()) {
      String field = fieldEnt.getKey();
      Map<BytesRef,Long> terms = fieldEnt.getValue();
View Full Code Here

Examples of org.apache.lucene.store.FlushInfo

    assert numDocsInRAM > 0;
    assert deleteSlice.isEmpty() : "all deletes must be applied in prepareFlush";
    segmentInfo.setDocCount(numDocsInRAM);
    final SegmentWriteState flushState = new SegmentWriteState(infoStream, directory, segmentInfo, fieldInfos.finish(),
        indexWriterConfig.getTermIndexInterval(),
        pendingUpdates, new IOContext(new FlushInfo(numDocsInRAM, bytesUsed())));
    final double startMBUsed = bytesUsed() / 1024. / 1024.;

    // Apply delete-by-docID now (delete-byDocID only
    // happens when an exception is hit processing that
    // doc, eg if analyzer has some problem w/ the text):
View Full Code Here

Examples of org.apache.lucene.store.FlushInfo

    SegmentCommitInfo newSegment = flushedSegment.segmentInfo;

    IndexWriter.setDiagnostics(newSegment.info, IndexWriter.SOURCE_FLUSH);
   
    IOContext context = new IOContext(new FlushInfo(newSegment.info.getDocCount(), newSegment.sizeInBytes()));

    boolean success = false;
    try {
     
      if (indexWriterConfig.getUseCompoundFile()) {
View Full Code Here

Examples of org.apache.lucene.store.FlushInfo

    // what we use in RAM:
    long bytes =  totalPostings * 8 + totalPayloadBytes;

    SegmentWriteState writeState = new SegmentWriteState(null, dir,
                                                         segmentInfo, newFieldInfos,
                                                         32, null, new IOContext(new FlushInfo(maxDoc, bytes)));
    FieldsConsumer fieldsConsumer = codec.postingsFormat().fieldsConsumer(writeState);

    for(Map.Entry<String,Map<BytesRef,Long>> fieldEnt : fields.entrySet()) {
      String field = fieldEnt.getKey();
      Map<BytesRef,Long> terms = fieldEnt.getValue();
View Full Code Here

Examples of org.apache.lucene.store.FlushInfo

    assert numDocsInRAM > 0;
    assert deleteSlice == null : "all deletes must be applied in prepareFlush";
    segmentInfo.setDocCount(numDocsInRAM);
    flushState = new SegmentWriteState(infoStream, directory, segmentInfo, fieldInfos.finish(),
        writer.getConfig().getTermIndexInterval(),
        pendingDeletes, new IOContext(new FlushInfo(numDocsInRAM, bytesUsed())));
    final double startMBUsed = parent.flushControl.netBytes() / 1024. / 1024.;

    // Apply delete-by-docID now (delete-byDocID only
    // happens when an exception is hit processing that
    // doc, eg if analyzer has some problem w/ the text):
View Full Code Here

Examples of org.apache.lucene.store.FlushInfo

    SegmentInfoPerCommit newSegment = flushedSegment.segmentInfo;

    IndexWriter.setDiagnostics(newSegment.info, "flush");
   
    IOContext context = new IOContext(new FlushInfo(newSegment.info.getDocCount(), newSegment.sizeInBytes()));

    boolean success = false;
    try {
      if (writer.useCompoundFile(newSegment)) {
View Full Code Here

Examples of org.apache.lucene.store.FlushInfo

    final int randomNumDocs = random.nextInt(4192);
    final int size = random.nextInt(512) * randomNumDocs;
    if (oldContext.flushInfo != null) {
      // Always return at least the estimatedSegmentSize of
      // the incoming IOContext:
      return new IOContext(new FlushInfo(randomNumDocs, Math.max(oldContext.flushInfo.estimatedSegmentSize, size)));
    } else if (oldContext.mergeInfo != null) {
      // Always return at least the estimatedMergeBytes of
      // the incoming IOContext:
      return new IOContext(new MergeInfo(randomNumDocs, Math.max(oldContext.mergeInfo.estimatedMergeBytes, size), random.nextBoolean(), TestUtil.nextInt(random, 1, 100)));
    } else {
      // Make a totally random IOContext:
      final IOContext context;
      switch (random.nextInt(5)) {
      case 0:
        context = IOContext.DEFAULT;
        break;
      case 1:
        context = IOContext.READ;
        break;
      case 2:
        context = IOContext.READONCE;
        break;
      case 3:
        context = new IOContext(new MergeInfo(randomNumDocs, size, true, -1));
        break;
      case 4:
        context = new IOContext(new FlushInfo(randomNumDocs, size));
        break;
      default:
        context = IOContext.DEFAULT;
      }
      return context;
View Full Code Here

Examples of org.apache.lucene.store.FlushInfo

    // what we use in RAM:
    long bytes =  totalPostings * 8 + totalPayloadBytes;

    SegmentWriteState writeState = new SegmentWriteState(null, dir,
                                                         segmentInfo, newFieldInfos,
                                                         32, null, new IOContext(new FlushInfo(maxDoc, bytes)));
    FieldsConsumer fieldsConsumer = codec.postingsFormat().fieldsConsumer(writeState);

    for(Map.Entry<String,Map<BytesRef,Long>> fieldEnt : fields.entrySet()) {
      String field = fieldEnt.getKey();
      Map<BytesRef,Long> terms = fieldEnt.getValue();
View Full Code Here

Examples of org.apache.lucene.store.FlushInfo

    }
  }

  private void initTermVectorsWriter() throws IOException {
    if (writer == null) {
      IOContext context = new IOContext(new FlushInfo(docWriter.getNumDocsInRAM(), docWriter.bytesUsed()));
      writer = docWriter.codec.termVectorsFormat().vectorsWriter(docWriter.directory, docWriter.getSegmentInfo(), context);
      lastDocID = 0;
    }
  }
View Full Code Here

Examples of org.apache.lucene.store.FlushInfo

    assert numDocsInRAM > 0;
    assert deleteSlice.isEmpty() : "all deletes must be applied in prepareFlush";
    segmentInfo.setDocCount(numDocsInRAM);
    final SegmentWriteState flushState = new SegmentWriteState(infoStream, directory, segmentInfo, fieldInfos.finish(),
        indexWriterConfig.getTermIndexInterval(),
        pendingUpdates, new IOContext(new FlushInfo(numDocsInRAM, bytesUsed())));
    final double startMBUsed = bytesUsed() / 1024. / 1024.;

    // Apply delete-by-docID now (delete-byDocID only
    // happens when an exception is hit processing that
    // doc, eg if analyzer has some problem w/ the text):
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.