Package org.apache.lucene.store

Examples of org.apache.lucene.store.TrackingDirectoryWrapper


     
      final IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, -1));

      // TODO: somehow we should fix this merge so it's
      // abortable so that IW.close(false) is able to stop it
      TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);

      SegmentInfo info = new SegmentInfo(directory, Constants.LUCENE_MAIN_VERSION, mergedName, -1,
                                         false, codec, null);

      SegmentMerger merger = new SegmentMerger(mergeReaders, info, infoStream, trackingDir, config.getTermIndexInterval(),
                                               MergeState.CheckAbort.NONE, globalFieldNumberMap,
                                               context, config.getCheckIntegrityAtMerge());
     
      if (!merger.shouldMerge()) {
        return;
      }

      MergeState mergeState;
      boolean success = false;
      try {
        mergeState = merger.merge();                // merge 'em
        success = true;
      } finally {
        if (!success) {
          synchronized(this) {
            deleter.refresh(info.name);
          }
        }
      }

      SegmentCommitInfo infoPerCommit = new SegmentCommitInfo(info, 0, -1L, -1L, -1L);

      info.setFiles(new HashSet<>(trackingDir.getCreatedFiles()));
      trackingDir.getCreatedFiles().clear();
                                        
      setDiagnostics(info, SOURCE_ADDINDEXES_READERS);

      boolean useCompoundFile;
      synchronized(this) { // Guard segmentInfos
        if (stopMerges) {
          deleter.deleteNewFiles(infoPerCommit.files());
          return;
        }
        ensureOpen();
        useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, infoPerCommit, this);
      }

      // Now create the compound file if needed
      if (useCompoundFile) {
        Collection<String> filesToDelete = infoPerCommit.files();
        try {
          createCompoundFile(infoStream, directory, MergeState.CheckAbort.NONE, info, context);
        } finally {
          // delete new non cfs files directly: they were never
          // registered with IFD
          synchronized(this) {
            deleter.deleteNewFiles(filesToDelete);
          }
        }
        info.setUseCompoundFile(true);
      }

      // Have codec write SegmentInfo.  Must do this after
      // creating CFS so that 1) .si isn't slurped into CFS,
      // and 2) .si reflects useCompoundFile=true change
      // above:
      success = false;
      try {
        codec.segmentInfoFormat().getSegmentInfoWriter().write(trackingDir, info, mergeState.fieldInfos, context);
        success = true;
      } finally {
        if (!success) {
          synchronized(this) {
            deleter.refresh(info.name);
          }
        }
      }

      info.addFiles(trackingDir.getCreatedFiles());

      // Register the new segment
      synchronized(this) {
        if (stopMerges) {
          deleter.deleteNewFiles(info.files());
View Full Code Here


    newInfo.setFiles(segFiles);

    // We must rewrite the SI file because it references
    // segment name (its own name, if its 3.x, and doc
    // store segment name):
    TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);
    final Codec currentCodec = newInfo.getCodec();
    try {
      currentCodec.segmentInfoFormat().getSegmentInfoWriter().write(trackingDir, newInfo, fis, context);
    } catch (UnsupportedOperationException uoe) {
      if (currentCodec instanceof Lucene3xCodec) {
        // OK: 3x codec cannot write a new SI file;
        // SegmentInfos will write this on commit
      } else {
        throw uoe;
      }
    }

    final Collection<String> siFiles = trackingDir.getCreatedFiles();

    boolean success = false;
    try {

      // Copy the segment's files
View Full Code Here

    List<SegmentCommitInfo> sourceSegments = merge.segments;
   
    IOContext context = new IOContext(merge.getMergeInfo());

    final MergeState.CheckAbort checkAbort = new MergeState.CheckAbort(merge, directory);
    final TrackingDirectoryWrapper dirWrapper = new TrackingDirectoryWrapper(directory);

    if (infoStream.isEnabled("IW")) {
      infoStream.message("IW", "merging " + segString(merge.segments));
    }

    merge.readers = new ArrayList<>();

    // This is try/finally to make sure merger's readers are
    // closed:
    boolean success = false;
    try {
      int segUpto = 0;
      while(segUpto < sourceSegments.size()) {

        final SegmentCommitInfo info = sourceSegments.get(segUpto);

        // Hold onto the "live" reader; we will use this to
        // commit merged deletes
        final ReadersAndUpdates rld = readerPool.get(info, true);

        // Carefully pull the most recent live docs and reader
        SegmentReader reader;
        final Bits liveDocs;
        final int delCount;

        synchronized (this) {
          // Must sync to ensure BufferedDeletesStream cannot change liveDocs,
          // pendingDeleteCount and field updates while we pull a copy:
          reader = rld.getReaderForMerge(context);
          liveDocs = rld.getReadOnlyLiveDocs();
          delCount = rld.getPendingDeleteCount() + info.getDelCount();

          assert reader != null;
          assert rld.verifyDocCounts();

          if (infoStream.isEnabled("IW")) {
            if (rld.getPendingDeleteCount() != 0) {
              infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount() + " pendingDelCount=" + rld.getPendingDeleteCount());
            } else if (info.getDelCount() != 0) {
              infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount());
            } else {
              infoStream.message("IW", "seg=" + segString(info) + " no deletes");
            }
          }
        }

        // Deletes might have happened after we pulled the merge reader and
        // before we got a read-only copy of the segment's actual live docs
        // (taking pending deletes into account). In that case we need to
        // make a new reader with updated live docs and del count.
        if (reader.numDeletedDocs() != delCount) {
          // fix the reader's live docs and del count
          assert delCount > reader.numDeletedDocs(); // beware of zombies

          SegmentReader newReader = new SegmentReader(info, reader, liveDocs, info.info.getDocCount() - delCount);
          boolean released = false;
          try {
            rld.release(reader);
            released = true;
          } finally {
            if (!released) {
              newReader.decRef();
            }
          }

          reader = newReader;
        }

        merge.readers.add(reader);
        assert delCount <= info.info.getDocCount(): "delCount=" + delCount + " info.docCount=" + info.info.getDocCount() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
        segUpto++;
      }

//      System.out.println("[" + Thread.currentThread().getName() + "] IW.mergeMiddle: merging " + merge.getMergeReaders());
     
      // we pass merge.getMergeReaders() instead of merge.readers to allow the
      // OneMerge to return a view over the actual segments to merge
      final SegmentMerger merger = new SegmentMerger(merge.getMergeReaders(),
          merge.info.info, infoStream, dirWrapper, config.getTermIndexInterval(),
          checkAbort, globalFieldNumberMap,
          context, config.getCheckIntegrityAtMerge());

      merge.checkAborted(directory);

      // This is where all the work happens:
      MergeState mergeState;
      boolean success3 = false;
      try {
        if (!merger.shouldMerge()) {
          // would result in a 0 document segment: nothing to merge!
          mergeState = new MergeState(new ArrayList<AtomicReader>(), merge.info.info, infoStream, checkAbort);
        } else {
          mergeState = merger.merge();
        }
        success3 = true;
      } finally {
        if (!success3) {
          synchronized(this) { 
            deleter.refresh(merge.info.info.name);
          }
        }
      }
      assert mergeState.segmentInfo == merge.info.info;
      merge.info.info.setFiles(new HashSet<>(dirWrapper.getCreatedFiles()));

      // Record which codec was used to write the segment

      if (infoStream.isEnabled("IW")) {
        if (merge.info.info.getDocCount() == 0) {
View Full Code Here

    assert liveDocs.length() == info.info.getDocCount();
   
    // Do this so we can delete any created files on
    // exception; this saves all codecs from having to do
    // it:
    TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
   
    // We can write directly to the actual name (vs to a
    // .tmp & renaming it) because the file is not live
    // until segments file is written:
    boolean success = false;
    try {
      Codec codec = info.info.getCodec();
      codec.liveDocsFormat().writeLiveDocs((MutableBits)liveDocs, trackingDir, info, pendingDeleteCount, IOContext.DEFAULT);
      success = true;
    } finally {
      if (!success) {
        // Advance only the nextWriteDelGen so that a 2nd
        // attempt to write will write to a new file
        info.advanceNextWriteDelGen();
       
        // Delete any partially created file(s):
        for (String fileName : trackingDir.getCreatedFiles()) {
          try {
            dir.deleteFile(fileName);
          } catch (Throwable t) {
            // Ignore so we throw only the first exc
          }
View Full Code Here

      final FieldInfo fieldInfo = infos.fieldInfo(field);
      assert fieldInfo != null;
      fieldInfo.setDocValuesGen(nextDocValuesGen);
      final FieldInfos fieldInfos = new FieldInfos(new FieldInfo[] { fieldInfo });
      // separately also track which files were created for this gen
      final TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
      final SegmentWriteState state = new SegmentWriteState(null, trackingDir, info.info, fieldInfos, termsIndexDivisor, null, updatesContext, segmentSuffix);
      try (final DocValuesConsumer fieldsConsumer = dvFormat.fieldsConsumer(state)) {
        // write the numeric updates to a new gen'd docvalues file
        fieldsConsumer.addNumericField(fieldInfo, new Iterable<Number>() {
          final NumericDocValues currentValues = reader.getNumericDocValues(field);
          final Bits docsWithField = reader.getDocsWithField(field);
          final int maxDoc = reader.maxDoc();
          final NumericDocValuesFieldUpdates.Iterator updatesIter = fieldUpdates.iterator();
          @Override
          public Iterator<Number> iterator() {
            updatesIter.reset();
            return new Iterator<Number>() {

              int curDoc = -1;
              int updateDoc = updatesIter.nextDoc();
             
              @Override
              public boolean hasNext() {
                return curDoc < maxDoc - 1;
              }

              @Override
              public Number next() {
                if (++curDoc >= maxDoc) {
                  throw new NoSuchElementException("no more documents to return values for");
                }
                if (curDoc == updateDoc) { // this document has an updated value
                  Long value = updatesIter.value(); // either null (unset value) or updated value
                  updateDoc = updatesIter.nextDoc(); // prepare for next round
                  return value;
                } else {
                  // no update for this document
                  assert curDoc < updateDoc;
                  if (currentValues != null && docsWithField.get(curDoc)) {
                    // only read the current value if the document had a value before
                    return currentValues.get(curDoc);
                  } else {
                    return null;
                  }
                }
              }

              @Override
              public void remove() {
                throw new UnsupportedOperationException("this iterator does not support removing elements");
              }
            };
          }
        });
      }
      info.advanceDocValuesGen();
      assert !fieldFiles.containsKey(fieldInfo.number);
      fieldFiles.put(fieldInfo.number, trackingDir.getCreatedFiles());
    }
  }
View Full Code Here

      final FieldInfo fieldInfo = infos.fieldInfo(field);
      assert fieldInfo != null;
      fieldInfo.setDocValuesGen(nextDocValuesGen);
      final FieldInfos fieldInfos = new FieldInfos(new FieldInfo[] { fieldInfo });
      // separately also track which files were created for this gen
      final TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
      final SegmentWriteState state = new SegmentWriteState(null, trackingDir, info.info, fieldInfos, termsIndexDivisor, null, updatesContext, segmentSuffix);
      try (final DocValuesConsumer fieldsConsumer = dvFormat.fieldsConsumer(state)) {
        // write the binary updates to a new gen'd docvalues file
        fieldsConsumer.addBinaryField(fieldInfo, new Iterable<BytesRef>() {
          final BinaryDocValues currentValues = reader.getBinaryDocValues(field);
          final Bits docsWithField = reader.getDocsWithField(field);
          final int maxDoc = reader.maxDoc();
          final BinaryDocValuesFieldUpdates.Iterator updatesIter = fieldUpdates.iterator();
          @Override
          public Iterator<BytesRef> iterator() {
            updatesIter.reset();
            return new Iterator<BytesRef>() {
             
              int curDoc = -1;
              int updateDoc = updatesIter.nextDoc();
             
              @Override
              public boolean hasNext() {
                return curDoc < maxDoc - 1;
              }
             
              @Override
              public BytesRef next() {
                if (++curDoc >= maxDoc) {
                  throw new NoSuchElementException("no more documents to return values for");
                }
                if (curDoc == updateDoc) { // this document has an updated value
                  BytesRef value = updatesIter.value(); // either null (unset value) or updated value
                  updateDoc = updatesIter.nextDoc(); // prepare for next round
                  return value;
                } else {
                  // no update for this document
                  assert curDoc < updateDoc;
                  if (currentValues != null && docsWithField.get(curDoc)) {
                    // only read the current value if the document had a value before
                    return currentValues.get(curDoc);
                  } else {
                    return null;
                  }
                }
              }
             
              @Override
              public void remove() {
                throw new UnsupportedOperationException("this iterator does not support removing elements");
              }
            };
          }
        });
      }
      info.advanceDocValuesGen();
      assert !fieldFiles.containsKey(fieldInfo.number);
      fieldFiles.put(fieldInfo.number, trackingDir.getCreatedFiles());
    }
  }
View Full Code Here

    // HEADER + FOOTER: 40
    // 90 bytes per-field (over estimating long name and attributes map)
    final long estInfosSize = 40 + 90 * fieldInfos.size();
    final IOContext infosContext = new IOContext(new FlushInfo(info.info.getDocCount(), estInfosSize));
    // separately also track which files were created for this gen
    final TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
    infosFormat.getFieldInfosWriter().write(trackingDir, info.info.name, segmentSuffix, fieldInfos, infosContext);
    info.advanceFieldInfosGen();
    return trackingDir.getCreatedFiles();
  }
View Full Code Here

    assert dvUpdates.any();
   
    // Do this so we can delete any created files on
    // exception; this saves all codecs from having to do
    // it:
    TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
   
    final Map<Integer,Set<String>> newDVFiles = new HashMap<>();
    Set<String> fieldInfosFiles = null;
    FieldInfos fieldInfos = null;
    boolean success = false;
    try {
      final Codec codec = info.info.getCodec();

      // reader could be null e.g. for a just merged segment (from
      // IndexWriter.commitMergedDeletes).
      final SegmentReader reader = this.reader == null ? new SegmentReader(info, writer.getConfig().getReaderTermsIndexDivisor(), IOContext.READONCE) : this.reader;
      try {
        // clone FieldInfos so that we can update their dvGen separately from
        // the reader's infos and write them to a new fieldInfos_gen file
        FieldInfos.Builder builder = new FieldInfos.Builder(writer.globalFieldNumberMap);
        // cannot use builder.add(reader.getFieldInfos()) because it does not
        // clone FI.attributes as well FI.dvGen
        for (FieldInfo fi : reader.getFieldInfos()) {
          FieldInfo clone = builder.add(fi);
          // copy the stuff FieldInfos.Builder doesn't copy
          if (fi.attributes() != null) {
            for (Entry<String,String> e : fi.attributes().entrySet()) {
              clone.putAttribute(e.getKey(), e.getValue());
            }
          }
          clone.setDocValuesGen(fi.getDocValuesGen());
        }
        // create new fields or update existing ones to have NumericDV type
        for (String f : dvUpdates.numericDVUpdates.keySet()) {
          builder.addOrUpdate(f, NumericDocValuesField.TYPE);
        }
        // create new fields or update existing ones to have BinaryDV type
        for (String f : dvUpdates.binaryDVUpdates.keySet()) {
          builder.addOrUpdate(f, BinaryDocValuesField.TYPE);
        }
       
        fieldInfos = builder.finish();
        final DocValuesFormat docValuesFormat = codec.docValuesFormat();
       
//          System.out.println("[" + Thread.currentThread().getName() + "] RLD.writeFieldUpdates: applying numeric updates; seg=" + info + " updates=" + numericFieldUpdates);
        handleNumericDVUpdates(fieldInfos, dvUpdates.numericDVUpdates, trackingDir, docValuesFormat, reader, newDVFiles);
       
//        System.out.println("[" + Thread.currentThread().getName() + "] RAU.writeFieldUpdates: applying binary updates; seg=" + info + " updates=" + dvUpdates.binaryDVUpdates);
        handleBinaryDVUpdates(fieldInfos, dvUpdates.binaryDVUpdates, trackingDir, docValuesFormat, reader, newDVFiles);

//        System.out.println("[" + Thread.currentThread().getName() + "] RAU.writeFieldUpdates: write fieldInfos; seg=" + info);
        fieldInfosFiles = writeFieldInfosGen(fieldInfos, trackingDir, docValuesFormat, codec.fieldInfosFormat());
      } finally {
        if (reader != this.reader) {
//          System.out.println("[" + Thread.currentThread().getName() + "] RLD.writeLiveDocs: closeReader " + reader);
          reader.close();
        }
      }
   
      success = true;
    } finally {
      if (!success) {
        // Advance only the nextWriteFieldInfosGen and nextWriteDocValuesGen, so
        // that a 2nd attempt to write will write to a new file
        info.advanceNextWriteFieldInfosGen();
        info.advanceNextWriteDocValuesGen();
       
        // Delete any partially created file(s):
        for (String fileName : trackingDir.getCreatedFiles()) {
          try {
            dir.deleteFile(fileName);
          } catch (Throwable t) {
            // Ignore so we throw only the first exc
          }
View Full Code Here

      IOContext context = newIOContext(random());
      SegmentReader r1 = new SegmentReader(si1, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, context);
      SegmentReader r2 = new SegmentReader(si2, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, context);

      final Codec codec = Codec.getDefault();
      TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(si1.info.dir);
      final SegmentInfo si = new SegmentInfo(si1.info.dir, Constants.LUCENE_MAIN_VERSION, merged, -1, false, codec, null);

      SegmentMerger merger = new SegmentMerger(Arrays.<AtomicReader>asList(r1, r2),
          si, InfoStream.getDefault(), trackingDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL,
          MergeState.CheckAbort.NONE, new FieldInfos.FieldNumbers(), context, true);

      MergeState mergeState = merger.merge();
      r1.close();
      r2.close();
      final SegmentInfo info = new SegmentInfo(si1.info.dir, Constants.LUCENE_MAIN_VERSION, merged,
                                               si1.info.getDocCount() + si2.info.getDocCount(),
                                               false, codec, null);
      info.setFiles(new HashSet<>(trackingDir.getCreatedFiles()));
     
      if (useCompoundFile) {
        Collection<String> filesToDelete = IndexWriter.createCompoundFile(InfoStream.getDefault(), dir, MergeState.CheckAbort.NONE, info, newIOContext(random()));
        info.setUseCompoundFile(true);
        for (final String fileToDelete : filesToDelete) {
View Full Code Here

      assert liveDocs.length() == info.info.getDocCount();

      // Do this so we can delete any created files on
      // exception; this saves all codecs from having to do
      // it:
      TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);

      // We can write directly to the actual name (vs to a
      // .tmp & renaming it) because the file is not live
      // until segments file is written:
      boolean success = false;
      try {
        info.info.getCodec().liveDocsFormat().writeLiveDocs((MutableBits)liveDocs, trackingDir, info, pendingDeleteCount, IOContext.DEFAULT);
        success = true;
      } finally {
        if (!success) {
          // Advance only the nextWriteDelGen so that a 2nd
          // attempt to write will write to a new file
          info.advanceNextWriteDelGen();

          // Delete any partially created file(s):
          for(String fileName : trackingDir.getCreatedFiles()) {
            try {
              dir.deleteFile(fileName);
            } catch (Throwable t) {
              // Ignore so we throw only the first exc
            }
View Full Code Here

TOP

Related Classes of org.apache.lucene.store.TrackingDirectoryWrapper

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.