Package org.apache.lucene.store

Examples of org.apache.lucene.store.TrackingDirectoryWrapper


      IOContext context = newIOContext(random());
      SegmentReader r1 = new SegmentReader(si1, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, context);
      SegmentReader r2 = new SegmentReader(si2, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, context);

      final Codec codec = Codec.getDefault();
      TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(si1.info.dir);
      final SegmentInfo si = new SegmentInfo(si1.info.dir, Constants.LUCENE_MAIN_VERSION, merged, -1, false, codec, null);

      SegmentMerger merger = new SegmentMerger(Arrays.<AtomicReader>asList(r1, r2),
          si, InfoStream.getDefault(), trackingDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL,
          MergeState.CheckAbort.NONE, new FieldInfos.FieldNumbers(), context);

      MergeState mergeState = merger.merge();
      r1.close();
      r2.close();
      final SegmentInfo info = new SegmentInfo(si1.info.dir, Constants.LUCENE_MAIN_VERSION, merged,
                                               si1.info.getDocCount() + si2.info.getDocCount(),
                                               false, codec, null);
      info.setFiles(new HashSet<String>(trackingDir.getCreatedFiles()));
     
      if (useCompoundFile) {
        Collection<String> filesToDelete = IndexWriter.createCompoundFile(InfoStream.getDefault(), dir, MergeState.CheckAbort.NONE, info, newIOContext(random()));
        info.setUseCompoundFile(true);
        for (final String fileToDelete : filesToDelete) {
View Full Code Here


    assert liveDocs.length() == info.info.getDocCount();
   
    // Do this so we can delete any created files on
    // exception; this saves all codecs from having to do
    // it:
    TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
   
    // We can write directly to the actual name (vs to a
    // .tmp & renaming it) because the file is not live
    // until segments file is written:
    boolean success = false;
    try {
      Codec codec = info.info.getCodec();
      codec.liveDocsFormat().writeLiveDocs((MutableBits)liveDocs, trackingDir, info, pendingDeleteCount, IOContext.DEFAULT);
      success = true;
    } finally {
      if (!success) {
        // Advance only the nextWriteDelGen so that a 2nd
        // attempt to write will write to a new file
        info.advanceNextWriteDelGen();
       
        // Delete any partially created file(s):
        for (String fileName : trackingDir.getCreatedFiles()) {
          try {
            dir.deleteFile(fileName);
          } catch (Throwable t) {
            // Ignore so we throw only the first exc
          }
View Full Code Here

    assert numericFieldUpdates != null && !numericFieldUpdates.isEmpty();
   
    // Do this so we can delete any created files on
    // exception; this saves all codecs from having to do
    // it:
    TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
   
    FieldInfos fieldInfos = null;
    boolean success = false;
    try {
      final Codec codec = info.info.getCodec();

      // reader could be null e.g. for a just merged segment (from
      // IndexWriter.commitMergedDeletes).
      final SegmentReader reader = this.reader == null ? new SegmentReader(info, writer.getConfig().getReaderTermsIndexDivisor(), IOContext.READONCE) : this.reader;
      try {
        // clone FieldInfos so that we can update their dvGen separately from
        // the reader's infos and write them to a new fieldInfos_gen file
        FieldInfos.Builder builder = new FieldInfos.Builder(writer.globalFieldNumberMap);
        // cannot use builder.add(reader.getFieldInfos()) because it does not
        // clone FI.attributes as well FI.dvGen
        for (FieldInfo fi : reader.getFieldInfos()) {
          FieldInfo clone = builder.add(fi);
          // copy the stuff FieldInfos.Builder doesn't copy
          if (fi.attributes() != null) {
            for (Entry<String,String> e : fi.attributes().entrySet()) {
              clone.putAttribute(e.getKey(), e.getValue());
            }
          }
          clone.setDocValuesGen(fi.getDocValuesGen());
        }
        // create new fields or update existing ones to have NumericDV type
        for (String f : numericFieldUpdates.keySet()) {
          builder.addOrUpdate(f, NumericDocValuesField.TYPE);
        }
       
        fieldInfos = builder.finish();
        final long nextFieldInfosGen = info.getNextFieldInfosGen();
        final String segmentSuffix = Long.toString(nextFieldInfosGen, Character.MAX_RADIX);
        final SegmentWriteState state = new SegmentWriteState(null, trackingDir, info.info, fieldInfos, writer.getConfig().getTermIndexInterval(), null, IOContext.DEFAULT, segmentSuffix);
        final DocValuesFormat docValuesFormat = codec.docValuesFormat();
        final DocValuesConsumer fieldsConsumer = docValuesFormat.fieldsConsumer(state);
        boolean fieldsConsumerSuccess = false;
        try {
//          System.out.println("[" + Thread.currentThread().getName() + "] RLD.writeLiveDocs: applying updates; seg=" + info + " updates=" + numericUpdates);
          for (Entry<String,NumericFieldUpdates> e : numericFieldUpdates.entrySet()) {
            final String field = e.getKey();
            final NumericFieldUpdates fieldUpdates = e.getValue();
            final FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
            assert fieldInfo != null;

            fieldInfo.setDocValuesGen(nextFieldInfosGen);
            // write the numeric updates to a new gen'd docvalues file
            fieldsConsumer.addNumericField(fieldInfo, new Iterable<Number>() {
              final NumericDocValues currentValues = reader.getNumericDocValues(field);
              final Bits docsWithField = reader.getDocsWithField(field);
              final int maxDoc = reader.maxDoc();
              final UpdatesIterator updatesIter = fieldUpdates.getUpdates();
              @Override
              public Iterator<Number> iterator() {
                updatesIter.reset();
                return new Iterator<Number>() {

                  int curDoc = -1;
                  int updateDoc = updatesIter.nextDoc();
                 
                  @Override
                  public boolean hasNext() {
                    return curDoc < maxDoc - 1;
                  }

                  @Override
                  public Number next() {
                    if (++curDoc >= maxDoc) {
                      throw new NoSuchElementException("no more documents to return values for");
                    }
                    if (curDoc == updateDoc) { // this document has an updated value
                      Long value = updatesIter.value(); // either null (unset value) or updated value
                      updateDoc = updatesIter.nextDoc(); // prepare for next round
                      return value;
                    } else {
                      // no update for this document
                      assert curDoc < updateDoc;
                      if (currentValues != null && docsWithField.get(curDoc)) {
                        // only read the current value if the document had a value before
                        return currentValues.get(curDoc);
                      } else {
                        return null;
                      }
                    }
                  }

                  @Override
                  public void remove() {
                    throw new UnsupportedOperationException("this iterator does not support removing elements");
                  }
                };
              }
            });
          }
         
          codec.fieldInfosFormat().getFieldInfosWriter().write(trackingDir, info.info.name, segmentSuffix, fieldInfos, IOContext.DEFAULT);
          fieldsConsumerSuccess = true;
        } finally {
          if (fieldsConsumerSuccess) {
            fieldsConsumer.close();
          } else {
            IOUtils.closeWhileHandlingException(fieldsConsumer);
          }
        }
      } finally {
        if (reader != this.reader) {
//          System.out.println("[" + Thread.currentThread().getName() + "] RLD.writeLiveDocs: closeReader " + reader);
          reader.close();
        }
      }
   
      success = true;
    } finally {
      if (!success) {
        // Advance only the nextWriteDocValuesGen so that a 2nd
        // attempt to write will write to a new file
        info.advanceNextWriteFieldInfosGen();
       
        // Delete any partially created file(s):
        for (String fileName : trackingDir.getCreatedFiles()) {
          try {
            dir.deleteFile(fileName);
          } catch (Throwable t) {
            // Ignore so we throw only the first exc
          }
        }
      }
    }
   
    info.advanceFieldInfosGen();
    // copy all the updates to mergingUpdates, so they can later be applied to the merged segment
    if (isMerging) {
      for (Entry<String,NumericFieldUpdates> e : numericFieldUpdates.entrySet()) {
        NumericFieldUpdates fieldUpdates = mergingNumericUpdates.get(e.getKey());
        if (fieldUpdates == null) {
          mergingNumericUpdates.put(e.getKey(), e.getValue());
        } else {
          fieldUpdates.merge(e.getValue());
        }
      }
    }
   
    // create a new map, keeping only the gens that are in use
    Map<Long,Set<String>> genUpdatesFiles = info.getUpdatesFiles();
    Map<Long,Set<String>> newGenUpdatesFiles = new HashMap<Long,Set<String>>();
    final long fieldInfosGen = info.getFieldInfosGen();
    for (FieldInfo fi : fieldInfos) {
      long dvGen = fi.getDocValuesGen();
      if (dvGen != -1 && !newGenUpdatesFiles.containsKey(dvGen)) {
        if (dvGen == fieldInfosGen) {
          newGenUpdatesFiles.put(fieldInfosGen, trackingDir.getCreatedFiles());
        } else {
          newGenUpdatesFiles.put(dvGen, genUpdatesFiles.get(dvGen));
        }
      }
    }
View Full Code Here

 
  public DocumentsWriterPerThread(String segmentName, Directory directory, LiveIndexWriterConfig indexWriterConfig, InfoStream infoStream, DocumentsWriterDeleteQueue deleteQueue,
      FieldInfos.Builder fieldInfos) {
    this.directoryOrig = directory;
    this.directory = new TrackingDirectoryWrapper(directory);
    this.fieldInfos = fieldInfos;
    this.indexWriterConfig = indexWriterConfig;
    this.infoStream = infoStream;
    this.codec = indexWriterConfig.getCodec();
    this.docState = new DocState(this, infoStream);
View Full Code Here

     
      final IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, -1));

      // TODO: somehow we should fix this merge so it's
      // abortable so that IW.close(false) is able to stop it
      TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);

      SegmentInfo info = new SegmentInfo(directory, Constants.LUCENE_MAIN_VERSION, mergedName, -1,
                                         false, codec, null);

      SegmentMerger merger = new SegmentMerger(mergeReaders, info, infoStream, trackingDir, config.getTermIndexInterval(),
                                               MergeState.CheckAbort.NONE, globalFieldNumberMap, context);
     
      if (!merger.shouldMerge()) {
        return;
      }

      MergeState mergeState;
      boolean success = false;
      try {
        mergeState = merger.merge();                // merge 'em
        success = true;
      } finally {
        if (!success) {
          synchronized(this) {
            deleter.refresh(info.name);
          }
        }
      }

      SegmentCommitInfo infoPerCommit = new SegmentCommitInfo(info, 0, -1L, -1L);

      info.setFiles(new HashSet<String>(trackingDir.getCreatedFiles()));
      trackingDir.getCreatedFiles().clear();
                                        
      setDiagnostics(info, SOURCE_ADDINDEXES_READERS);

      boolean useCompoundFile;
      synchronized(this) { // Guard segmentInfos
        if (stopMerges) {
          deleter.deleteNewFiles(infoPerCommit.files());
          return;
        }
        ensureOpen();
        useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, infoPerCommit);
      }

      // Now create the compound file if needed
      if (useCompoundFile) {
        Collection<String> filesToDelete = infoPerCommit.files();
        try {
          createCompoundFile(infoStream, directory, MergeState.CheckAbort.NONE, info, context);
        } finally {
          // delete new non cfs files directly: they were never
          // registered with IFD
          synchronized(this) {
            deleter.deleteNewFiles(filesToDelete);
          }
        }
        info.setUseCompoundFile(true);
      }

      // Have codec write SegmentInfo.  Must do this after
      // creating CFS so that 1) .si isn't slurped into CFS,
      // and 2) .si reflects useCompoundFile=true change
      // above:
      success = false;
      try {
        codec.segmentInfoFormat().getSegmentInfoWriter().write(trackingDir, info, mergeState.fieldInfos, context);
        success = true;
      } finally {
        if (!success) {
          synchronized(this) {
            deleter.refresh(info.name);
          }
        }
      }

      info.addFiles(trackingDir.getCreatedFiles());

      // Register the new segment
      synchronized(this) {
        if (stopMerges) {
          deleter.deleteNewFiles(info.files());
View Full Code Here

    newInfo.setFiles(segFiles);

    // We must rewrite the SI file because it references
    // segment name (its own name, if its 3.x, and doc
    // store segment name):
    TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);
    final Codec currentCodec = newInfo.getCodec();
    try {
      currentCodec.segmentInfoFormat().getSegmentInfoWriter().write(trackingDir, newInfo, fis, context);
    } catch (UnsupportedOperationException uoe) {
      if (currentCodec instanceof Lucene3xCodec) {
        // OK: 3x codec cannot write a new SI file;
        // SegmentInfos will write this on commit
      } else {
        throw uoe;
      }
    }

    final Collection<String> siFiles = trackingDir.getCreatedFiles();

    boolean success = false;
    try {

      // Copy the segment's files
View Full Code Here

    List<SegmentCommitInfo> sourceSegments = merge.segments;
   
    IOContext context = new IOContext(merge.getMergeInfo());

    final MergeState.CheckAbort checkAbort = new MergeState.CheckAbort(merge, directory);
    final TrackingDirectoryWrapper dirWrapper = new TrackingDirectoryWrapper(directory);

    if (infoStream.isEnabled("IW")) {
      infoStream.message("IW", "merging " + segString(merge.segments));
    }

    merge.readers = new ArrayList<SegmentReader>();

    // This is try/finally to make sure merger's readers are
    // closed:
    boolean success = false;
    try {
      int segUpto = 0;
      while(segUpto < sourceSegments.size()) {

        final SegmentCommitInfo info = sourceSegments.get(segUpto);

        // Hold onto the "live" reader; we will use this to
        // commit merged deletes
        final ReadersAndUpdates rld = readerPool.get(info, true);

        // Carefully pull the most recent live docs and reader
        SegmentReader reader;
        final Bits liveDocs;
        final int delCount;

        synchronized (this) {
          // Must sync to ensure BufferedDeletesStream cannot change liveDocs,
          // pendingDeleteCount and field updates while we pull a copy:
          reader = rld.getReaderForMerge(context);
          liveDocs = rld.getReadOnlyLiveDocs();
          delCount = rld.getPendingDeleteCount() + info.getDelCount();

          assert reader != null;
          assert rld.verifyDocCounts();

          if (infoStream.isEnabled("IW")) {
            if (rld.getPendingDeleteCount() != 0) {
              infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount() + " pendingDelCount=" + rld.getPendingDeleteCount());
            } else if (info.getDelCount() != 0) {
              infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount());
            } else {
              infoStream.message("IW", "seg=" + segString(info) + " no deletes");
            }
          }
        }

        // Deletes might have happened after we pulled the merge reader and
        // before we got a read-only copy of the segment's actual live docs
        // (taking pending deletes into account). In that case we need to
        // make a new reader with updated live docs and del count.
        if (reader.numDeletedDocs() != delCount) {
          // fix the reader's live docs and del count
          assert delCount > reader.numDeletedDocs(); // beware of zombies

          SegmentReader newReader = new SegmentReader(info, reader, liveDocs, info.info.getDocCount() - delCount);
          boolean released = false;
          try {
            rld.release(reader);
            released = true;
          } finally {
            if (!released) {
              newReader.decRef();
            }
          }

          reader = newReader;
        }

        merge.readers.add(reader);
        assert delCount <= info.info.getDocCount(): "delCount=" + delCount + " info.docCount=" + info.info.getDocCount() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
        segUpto++;
      }

//      System.out.println("[" + Thread.currentThread().getName() + "] IW.mergeMiddle: merging " + merge.getMergeReaders());
     
      // we pass merge.getMergeReaders() instead of merge.readers to allow the
      // OneMerge to return a view over the actual segments to merge
      final SegmentMerger merger = new SegmentMerger(merge.getMergeReaders(),
          merge.info.info, infoStream, dirWrapper, config.getTermIndexInterval(),
          checkAbort, globalFieldNumberMap, context);

      merge.checkAborted(directory);

      // This is where all the work happens:
      MergeState mergeState;
      boolean success3 = false;
      try {
        if (!merger.shouldMerge()) {
          // would result in a 0 document segment: nothing to merge!
          mergeState = new MergeState(new ArrayList<AtomicReader>(), merge.info.info, infoStream, checkAbort);
        } else {
          mergeState = merger.merge();
        }
        success3 = true;
      } finally {
        if (!success3) {
          synchronized(this) { 
            deleter.refresh(merge.info.info.name);
          }
        }
      }
      assert mergeState.segmentInfo == merge.info.info;
      merge.info.info.setFiles(new HashSet<String>(dirWrapper.getCreatedFiles()));

      // Record which codec was used to write the segment

      if (infoStream.isEnabled("IW")) {
        if (merge.info.info.getDocCount() == 0) {
View Full Code Here

     
      final IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, -1));

      // TODO: somehow we should fix this merge so it's
      // abortable so that IW.close(false) is able to stop it
      TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);

      SegmentInfo info = new SegmentInfo(directory, Constants.LUCENE_MAIN_VERSION, mergedName, -1,
                                         false, codec, null);

      SegmentMerger merger = new SegmentMerger(mergeReaders, info, infoStream, trackingDir, config.getTermIndexInterval(),
                                               MergeState.CheckAbort.NONE, globalFieldNumberMap, context);
     
      if (!merger.shouldMerge()) {
        return;
      }

      MergeState mergeState;
      boolean success = false;
      try {
        mergeState = merger.merge();                // merge 'em
        success = true;
      } finally {
        if (!success) {
          synchronized(this) {
            deleter.refresh(info.name);
          }
        }
      }

      SegmentCommitInfo infoPerCommit = new SegmentCommitInfo(info, 0, -1L, -1L);

      info.setFiles(new HashSet<String>(trackingDir.getCreatedFiles()));
      trackingDir.getCreatedFiles().clear();
                                        
      setDiagnostics(info, SOURCE_ADDINDEXES_READERS);

      boolean useCompoundFile;
      synchronized(this) { // Guard segmentInfos
        if (stopMerges) {
          deleter.deleteNewFiles(infoPerCommit.files());
          return;
        }
        ensureOpen();
        useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, infoPerCommit);
      }

      // Now create the compound file if needed
      if (useCompoundFile) {
        Collection<String> filesToDelete = infoPerCommit.files();
        try {
          createCompoundFile(infoStream, directory, MergeState.CheckAbort.NONE, info, context);
        } finally {
          // delete new non cfs files directly: they were never
          // registered with IFD
          synchronized(this) {
            deleter.deleteNewFiles(filesToDelete);
          }
        }
        info.setUseCompoundFile(true);
      }

      // Have codec write SegmentInfo.  Must do this after
      // creating CFS so that 1) .si isn't slurped into CFS,
      // and 2) .si reflects useCompoundFile=true change
      // above:
      success = false;
      try {
        codec.segmentInfoFormat().getSegmentInfoWriter().write(trackingDir, info, mergeState.fieldInfos, context);
        success = true;
      } finally {
        if (!success) {
          synchronized(this) {
            deleter.refresh(info.name);
          }
        }
      }

      info.addFiles(trackingDir.getCreatedFiles());

      // Register the new segment
      synchronized(this) {
        if (stopMerges) {
          deleter.deleteNewFiles(info.files());
View Full Code Here

    newInfo.setFiles(segFiles);

    // We must rewrite the SI file because it references
    // segment name (its own name, if its 3.x, and doc
    // store segment name):
    TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);
    final Codec currentCodec = newInfo.getCodec();
    try {
      currentCodec.segmentInfoFormat().getSegmentInfoWriter().write(trackingDir, newInfo, fis, context);
    } catch (UnsupportedOperationException uoe) {
      if (currentCodec instanceof Lucene3xCodec) {
        // OK: 3x codec cannot write a new SI file;
        // SegmentInfos will write this on commit
      } else {
        throw uoe;
      }
    }

    final Collection<String> siFiles = trackingDir.getCreatedFiles();

    boolean success = false;
    try {

      // Copy the segment's files
View Full Code Here

    List<SegmentCommitInfo> sourceSegments = merge.segments;
   
    IOContext context = new IOContext(merge.getMergeInfo());

    final MergeState.CheckAbort checkAbort = new MergeState.CheckAbort(merge, directory);
    final TrackingDirectoryWrapper dirWrapper = new TrackingDirectoryWrapper(directory);

    if (infoStream.isEnabled("IW")) {
      infoStream.message("IW", "merging " + segString(merge.segments));
    }

    merge.readers = new ArrayList<SegmentReader>();

    // This is try/finally to make sure merger's readers are
    // closed:
    boolean success = false;
    try {
      int segUpto = 0;
      while(segUpto < sourceSegments.size()) {

        final SegmentCommitInfo info = sourceSegments.get(segUpto);

        // Hold onto the "live" reader; we will use this to
        // commit merged deletes
        final ReadersAndUpdates rld = readerPool.get(info, true);

        // Carefully pull the most recent live docs and reader
        SegmentReader reader;
        final Bits liveDocs;
        final int delCount;

        synchronized (this) {
          // Must sync to ensure BufferedDeletesStream cannot change liveDocs,
          // pendingDeleteCount and field updates while we pull a copy:
          reader = rld.getReaderForMerge(context);
          liveDocs = rld.getReadOnlyLiveDocs();
          delCount = rld.getPendingDeleteCount() + info.getDelCount();

          assert reader != null;
          assert rld.verifyDocCounts();

          if (infoStream.isEnabled("IW")) {
            if (rld.getPendingDeleteCount() != 0) {
              infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount() + " pendingDelCount=" + rld.getPendingDeleteCount());
            } else if (info.getDelCount() != 0) {
              infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount());
            } else {
              infoStream.message("IW", "seg=" + segString(info) + " no deletes");
            }
          }
        }

        // Deletes might have happened after we pulled the merge reader and
        // before we got a read-only copy of the segment's actual live docs
        // (taking pending deletes into account). In that case we need to
        // make a new reader with updated live docs and del count.
        if (reader.numDeletedDocs() != delCount) {
          // fix the reader's live docs and del count
          assert delCount > reader.numDeletedDocs(); // beware of zombies

          SegmentReader newReader = new SegmentReader(info, reader, liveDocs, info.info.getDocCount() - delCount);
          boolean released = false;
          try {
            rld.release(reader);
            released = true;
          } finally {
            if (!released) {
              newReader.decRef();
            }
          }

          reader = newReader;
        }

        merge.readers.add(reader);
        assert delCount <= info.info.getDocCount(): "delCount=" + delCount + " info.docCount=" + info.info.getDocCount() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
        segUpto++;
      }

//      System.out.println("[" + Thread.currentThread().getName() + "] IW.mergeMiddle: merging " + merge.getMergeReaders());
     
      // we pass merge.getMergeReaders() instead of merge.readers to allow the
      // OneMerge to return a view over the actual segments to merge
      final SegmentMerger merger = new SegmentMerger(merge.getMergeReaders(),
          merge.info.info, infoStream, dirWrapper, config.getTermIndexInterval(),
          checkAbort, globalFieldNumberMap, context);

      merge.checkAborted(directory);

      // This is where all the work happens:
      MergeState mergeState;
      boolean success3 = false;
      try {
        if (!merger.shouldMerge()) {
          // would result in a 0 document segment: nothing to merge!
          mergeState = new MergeState(new ArrayList<AtomicReader>(), merge.info.info, infoStream, checkAbort);
        } else {
          mergeState = merger.merge();
        }
        success3 = true;
      } finally {
        if (!success3) {
          synchronized(this) { 
            deleter.refresh(merge.info.info.name);
          }
        }
      }
      assert mergeState.segmentInfo == merge.info.info;
      merge.info.info.setFiles(new HashSet<String>(dirWrapper.getCreatedFiles()));

      // Record which codec was used to write the segment

      if (infoStream.isEnabled("IW")) {
        if (merge.info.info.getDocCount() == 0) {
View Full Code Here

TOP

Related Classes of org.apache.lucene.store.TrackingDirectoryWrapper

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.