Package org.apache.lucene.util

Examples of org.apache.lucene.util.Bits


    }
    CategoryPath catPath = fresNode.label;

    Term drillDownTerm = DrillDownQuery.term(searchParams.indexingParams, catPath);
    // TODO (Facet): avoid Multi*?
    Bits liveDocs = MultiFields.getLiveDocs(indexReader);
    int updatedCount = countIntersection(MultiFields.getTermDocsEnum(indexReader, liveDocs,
                                                                     drillDownTerm.field(), drillDownTerm.bytes(),
                                                                     0), docIds.iterator());
    fresNode.value = updatedCount;
  }
View Full Code Here


    return bits;
  }

  @Test
  public void testDocsEnum() throws Exception {
    Bits mappedLiveDocs = randomLiveDocs(reader.maxDoc());
    TermsEnum termsEnum = reader.terms(DOCS_ENUM_FIELD).iterator(null);
    assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef(DOCS_ENUM_TERM)));
    DocsEnum docs = termsEnum.docs(mappedLiveDocs, null);

    int doc;
    int prev = -1;
    while ((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
      assertTrue("document " + doc + " marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(doc));
      assertEquals("incorrect value; doc " + doc, sortedValues[doc].intValue(), Integer.parseInt(reader.document(doc).get(ID_FIELD)));
      while (++prev < doc) {
        assertFalse("document " + prev + " not marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(prev));
      }
    }
    while (++prev < reader.maxDoc()) {
      assertFalse("document " + prev + " not marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(prev));
    }

    DocsEnum reuse = docs;
    docs = termsEnum.docs(mappedLiveDocs, reuse);
    if (docs instanceof SortingDocsEnum) {
      assertTrue(((SortingDocsEnum) docs).reused(reuse)); // make sure reuse worked
    }
    doc = -1;
    prev = -1;
    while ((doc = docs.advance(doc + 1)) != DocIdSetIterator.NO_MORE_DOCS) {
      assertTrue("document " + doc + " marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(doc));
      assertEquals("incorrect value; doc " + doc, sortedValues[doc].intValue(), Integer.parseInt(reader.document(doc).get(ID_FIELD)));
      while (++prev < doc) {
        assertFalse("document " + prev + " not marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(prev));
      }
    }
    while (++prev < reader.maxDoc()) {
      assertFalse("document " + prev + " not marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(prev));
    }
  }
View Full Code Here

  private Bits getMissingBits(int fieldNumber, final long offset, final long length) throws IOException {
    if (offset == -1) {
      return new Bits.MatchAllBits(maxDoc);
    } else {
      Bits instance;
      synchronized(this) {
        instance = docsWithFieldInstances.get(fieldNumber);
        if (instance == null) {
          IndexInput data = this.data.clone();
          data.seek(offset);
View Full Code Here

 
  private Bits getMissingBits(int fieldNumber, final long offset, final long length) throws IOException {
    if (offset == -1) {
      return new Bits.MatchAllBits(maxDoc);
    } else {
      Bits instance;
      synchronized(this) {
        instance = docsWithFieldInstances.get(fieldNumber);
        if (instance == null) {
          IndexInput data = this.data.clone();
          data.seek(offset);
View Full Code Here

              @Override
              public Bits bits() throws IOException {
                if (nullBitset) {
                  return null;
                }
                return new Bits() {
                 
                  @Override
                  public boolean get(int index) {
                    assertTrue("filter was called for a non-matching doc",
                        bitSet.get(index));
View Full Code Here

          matchingVectorsReader = (CompressingTermVectorsReader) vectorsReader;
        }
      }

      final int maxDoc = reader.maxDoc();
      final Bits liveDocs = reader.getLiveDocs();

      if (matchingVectorsReader == null
          || matchingVectorsReader.getCompressionMode() != compressionMode
          || matchingVectorsReader.getChunkSize() != chunkSize
          || matchingVectorsReader.getPackedIntsVersion() != PackedInts.VERSION_CURRENT) {
View Full Code Here

    @Override
    public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
        boolean topScorer, Bits acceptDocs) throws IOException {
      assert !terms.isEmpty();
      final AtomicReader reader = context.reader();
      final Bits liveDocs = acceptDocs;
      PostingsAndFreq[] postingsFreqs = new PostingsAndFreq[terms.size()];

      final Terms fieldTerms = reader.terms(field);
      if (fieldTerms == null) {
        return null;
View Full Code Here

      assert liveDocs != null;
    }
   
    @Override
    public final int nextDoc() throws IOException {
      final Bits liveDocs = this.liveDocs;
      for (int i = start+1; i < count; i++) {
        int d = docs[i];
        if (liveDocs.get(d)) {
          start = i;
          freq = freqs[i];
          return doc = d;
        }
      }
View Full Code Here

   
    for (int i = 0; i < sourceSegments.size(); i++) {
      SegmentCommitInfo info = sourceSegments.get(i);
      minGen = Math.min(info.getBufferedDeletesGen(), minGen);
      final int docCount = info.info.getDocCount();
      final Bits prevLiveDocs = merge.readers.get(i).getLiveDocs();
      final ReadersAndUpdates rld = readerPool.get(info, false);
      // We hold a ref so it should still be in the pool:
      assert rld != null: "seg=" + info.info.name;
      final Bits currentLiveDocs = rld.getLiveDocs();
      final Map<String,NumericFieldUpdates> mergingFieldUpdates = rld.getMergingFieldUpdates();
      final String[] mergingFields;
      final UpdatesIterator[] updatesIters;
      if (mergingFieldUpdates.isEmpty()) {
        mergingFields = null;
        updatesIters = null;
      } else {
        mergingFields = new String[mergingFieldUpdates.size()];
        updatesIters = new UpdatesIterator[mergingFieldUpdates.size()];
        int idx = 0;
        for (Entry<String,NumericFieldUpdates> e : mergingFieldUpdates.entrySet()) {
          mergingFields[idx] = e.getKey();
          updatesIters[idx] = e.getValue().getUpdates();
          updatesIters[idx].nextDoc(); // advance to first update doc
          ++idx;
        }
      }
//      System.out.println("[" + Thread.currentThread().getName() + "] IW.commitMergedDeletes: info=" + info + ", mergingUpdates=" + mergingUpdates);

      if (prevLiveDocs != null) {

        // If we had deletions on starting the merge we must
        // still have deletions now:
        assert currentLiveDocs != null;
        assert prevLiveDocs.length() == docCount;
        assert currentLiveDocs.length() == docCount;

        // There were deletes on this segment when the merge
        // started.  The merge has collapsed away those
        // deletes, but, if new deletes were flushed since
        // the merge started, we must now carefully keep any
        // newly flushed deletes but mapping them to the new
        // docIDs.

        // Since we copy-on-write, if any new deletes were
        // applied after merging has started, we can just
        // check if the before/after liveDocs have changed.
        // If so, we must carefully merge the liveDocs one
        // doc at a time:
        if (currentLiveDocs != prevLiveDocs) {
          // This means this segment received new deletes
          // since we started the merge, so we
          // must merge them:
          for (int j = 0; j < docCount; j++) {
            if (!prevLiveDocs.get(j)) {
              assert !currentLiveDocs.get(j);
            } else {
              if (!currentLiveDocs.get(j)) {
                if (mergedDeletesAndUpdates == null) {
                  mergedDeletesAndUpdates = readerPool.get(merge.info, true);
                  mergedDeletesAndUpdates.initWritableLiveDocs();
                  initWritableLiveDocs = true;
                  docMap = getDocMap(merge, mergeState);
                } else if (!initWritableLiveDocs) { // mergedDeletes was initialized by field-updates changes
                  mergedDeletesAndUpdates.initWritableLiveDocs();
                  initWritableLiveDocs = true;
                }
                mergedDeletesAndUpdates.delete(docMap.map(docUpto));
                if (mergingFields != null) { // advance all iters beyond the deleted document
                  skipDeletedDoc(updatesIters, j);
                }
              } else if (mergingFields != null) {
                // document isn't deleted, check if any of the fields have an update to it
                int newDoc = -1;
                for (int idx = 0; idx < mergingFields.length; idx++) {
                  UpdatesIterator updatesIter = updatesIters[idx];
                  if (updatesIter.doc() == j) { // document has an update
                    if (mergedDeletesAndUpdates == null) {
                      mergedDeletesAndUpdates = readerPool.get(merge.info, true);
                      docMap = getDocMap(merge, mergeState);
                    }
                    if (newDoc == -1) { // map once per all field updates, but only if there are any updates
                      newDoc = docMap.map(docUpto);
                    }
                    String field = mergingFields[idx];
                    NumericFieldUpdates fieldUpdates = mergedFieldUpdates.get(field);
                    if (fieldUpdates == null) {
                      // an approximantion of maxDoc, used to compute best bitsPerValue
                      fieldUpdates = new NumericFieldUpdates.PackedNumericFieldUpdates(mergeState.segmentInfo.getDocCount());
                      mergedFieldUpdates.put(field, fieldUpdates);
                    }
                    fieldUpdates.add(newDoc, updatesIter.value() == null ? NumericUpdate.MISSING : updatesIter.value());
                    updatesIter.nextDoc(); // advance to next document
                  } else {
                    assert updatesIter.doc() > j : "updateDoc=" + updatesIter.doc() + " curDoc=" + j;
                  }
                }
              }
              docUpto++;
            }
          }
        } else if (mergingFields != null) {
          // need to check each non-deleted document if it has any updates
          for (int j = 0; j < docCount; j++) {
            if (prevLiveDocs.get(j)) {
              // document isn't deleted, check if any of the fields have an update to it
              int newDoc = -1;
              for (int idx = 0; idx < mergingFields.length; idx++) {
                UpdatesIterator updatesIter = updatesIters[idx];
                if (updatesIter.doc() == j) { // document has an update
                  if (mergedDeletesAndUpdates == null) {
                    mergedDeletesAndUpdates = readerPool.get(merge.info, true);
                    docMap = getDocMap(merge, mergeState);
                  }
                  if (newDoc == -1) { // map once per all field updates, but only if there are any updates
                    newDoc = docMap.map(docUpto);
                  }
                  String field = mergingFields[idx];
                  NumericFieldUpdates fieldUpdates = mergedFieldUpdates.get(field);
                  if (fieldUpdates == null) {
                    // an approximantion of maxDoc, used to compute best bitsPerValue
                    fieldUpdates = new NumericFieldUpdates.PackedNumericFieldUpdates(mergeState.segmentInfo.getDocCount());
                    mergedFieldUpdates.put(field, fieldUpdates);
                  }
                  fieldUpdates.add(newDoc, updatesIter.value() == null ? NumericUpdate.MISSING : updatesIter.value());
                  updatesIter.nextDoc(); // advance to next document
                } else {
                  assert updatesIter.doc() > j : "updateDoc=" + updatesIter.doc() + " curDoc=" + j;
                }
              }
              // advance docUpto for every non-deleted document
              docUpto++;
            } else {
              // advance all iters beyond the deleted document
              skipDeletedDoc(updatesIters, j);
            }
          }
        } else {
          docUpto += info.info.getDocCount() - info.getDelCount() - rld.getPendingDeleteCount();
        }
      } else if (currentLiveDocs != null) {
        assert currentLiveDocs.length() == docCount;
        // This segment had no deletes before but now it
        // does:
        for (int j = 0; j < docCount; j++) {
          if (!currentLiveDocs.get(j)) {
            if (mergedDeletesAndUpdates == null) {
              mergedDeletesAndUpdates = readerPool.get(merge.info, true);
              mergedDeletesAndUpdates.initWritableLiveDocs();
              initWritableLiveDocs = true;
              docMap = getDocMap(merge, mergeState);
View Full Code Here

    @Override
    protected final int linearScan(int scanTo) throws IOException {
      final int[] docs = this.docs;
      final int upTo = count;
      final Bits liveDocs = this.liveDocs;
      for (int i = start; i < upTo; i++) {
        int d = docs[i];
        if (scanTo <= d && liveDocs.get(d)) {
          start = i;
          freq = freqs[i];
          return doc = docs[i];
        }
      }
View Full Code Here

TOP

Related Classes of org.apache.lucene.util.Bits

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.