Package org.apache.lucene.index

Examples of org.apache.lucene.index.Fields


    in.document(docMap.newToOld(docID), visitor);
  }
 
  @Override
  public Fields fields() throws IOException {
    Fields fields = in.fields();
    if (fields == null) {
      return null;
    } else {
      return new SortingFields(fields, in.getFieldInfos(), docMap);
    }
View Full Code Here


  }

  private void duellReaders(CompositeReader other, AtomicReader memIndexReader)
      throws IOException {
    AtomicReader competitor = new SlowCompositeReaderWrapper(other);
    Fields memFields = memIndexReader.fields();
    for (String field : competitor.fields()) {
      Terms memTerms = memFields.terms(field);
      Terms iwTerms = memIndexReader.terms(field);
      if (iwTerms == null) {
        assertNull(memTerms);
      } else {
        NumericDocValues normValues = competitor.getNormValues(field);
View Full Code Here

  }

  @Override
  public Fields get(int docID) throws IOException {
    if (tvx != null) {
      Fields fields = new TVFields(docID);
      if (fields.size() == 0) {
        // TODO: we can improve writer here, eg write 0 into
        // tvx file, so we know on first read from tvx that
        // this doc has no TVs
        return null;
      } else {
View Full Code Here

    IndexReader reader = DirectoryReader.open(benchmark.getRunData().getDirectory());
    assertEquals(NUM_DOCS, reader.numDocs());

    int totalTokenCount2 = 0;

    Fields fields = MultiFields.getFields(reader);

    for (String fieldName : fields) {
      if (fieldName.equals(DocMaker.ID_FIELD) || fieldName.equals(DocMaker.DATE_MSEC_FIELD) || fieldName.equals(DocMaker.TIME_SEC_FIELD)) {
        continue;
      }
      Terms terms = fields.terms(fieldName);
      if (terms == null) {
        continue;
      }
      TermsEnum termsEnum = terms.iterator(null);
      DocsEnum docs = null;
View Full Code Here

    assertEquals(3, ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor());
    assertFalse(((LogMergePolicy) writer.getConfig().getMergePolicy()).getUseCompoundFile());
    writer.close();
    Directory dir = benchmark.getRunData().getDirectory();
    IndexReader reader = DirectoryReader.open(dir);
    Fields tfv = reader.getTermVectors(0);
    assertNotNull(tfv);
    assertTrue(tfv.size() > 0);
    reader.close();
  }
View Full Code Here

  public static TokenStream getAnyTokenStream(IndexReader reader, int docId,
      String field, Document doc, Analyzer analyzer) throws IOException {
    TokenStream ts = null;

    Fields vectors = reader.getTermVectors(docId);
    if (vectors != null) {
      Terms vector = vectors.terms(field);
      if (vector != null) {
        ts = getTokenStream(vector);
      }
    }
View Full Code Here

   */
  public static TokenStream getAnyTokenStream(IndexReader reader, int docId,
      String field, Analyzer analyzer) throws IOException {
    TokenStream ts = null;

    Fields vectors = reader.getTermVectors(docId);
    if (vectors != null) {
      Terms vector = vectors.terms(field);
      if (vector != null) {
        ts = getTokenStream(vector);
      }
    }

View Full Code Here

   * @throws IOException If there is a low-level I/O error
   */
  public static TokenStream getTokenStreamWithOffsets(IndexReader reader, int docId,
                                                      String field) throws IOException {

    Fields vectors = reader.getTermVectors(docId);
    if (vectors == null) {
      return null;
    }

    Terms vector = vectors.terms(field);
    if (vector == null) {
      return null;
    }

    if (!vector.hasPositions() || !vector.hasOffsets()) {
View Full Code Here

          || matchingVectorsReader.getCompressionMode() != compressionMode
          || matchingVectorsReader.getChunkSize() != chunkSize
          || matchingVectorsReader.getPackedIntsVersion() != PackedInts.VERSION_CURRENT) {
        // naive merge...
        for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
          final Fields vectors = reader.getTermVectors(i);
          addAllDocVectors(vectors, mergeState);
          ++docCount;
          mergeState.checkAbort.work(300);
        }
      } else {
        final CompressingStoredFieldsIndexReader index = matchingVectorsReader.getIndex();
        final IndexInput vectorsStream = matchingVectorsReader.getVectorsStream();
        for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; ) {
          if (pendingDocs.isEmpty()
              && (i == 0 || index.getStartPointer(i - 1) < index.getStartPointer(i))) { // start of a chunk
            final long startPointer = index.getStartPointer(i);
            vectorsStream.seek(startPointer);
            final int docBase = vectorsStream.readVInt();
            final int chunkDocs = vectorsStream.readVInt();
            assert docBase + chunkDocs <= matchingSegmentReader.maxDoc();
            if (docBase + chunkDocs < matchingSegmentReader.maxDoc()
                && nextDeletedDoc(docBase, liveDocs, docBase + chunkDocs) == docBase + chunkDocs) {
              final long chunkEnd = index.getStartPointer(docBase + chunkDocs);
              final long chunkLength = chunkEnd - vectorsStream.getFilePointer();
              indexWriter.writeIndex(chunkDocs, this.vectorsStream.getFilePointer());
              this.vectorsStream.writeVInt(docCount);
              this.vectorsStream.writeVInt(chunkDocs);
              this.vectorsStream.copyBytes(vectorsStream, chunkLength);
              docCount += chunkDocs;
              this.numDocs += chunkDocs;
              mergeState.checkAbort.work(300 * chunkDocs);
              i = nextLiveDoc(docBase + chunkDocs, liveDocs, maxDoc);
            } else {
              for (; i < docBase + chunkDocs; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
                final Fields vectors = reader.getTermVectors(i);
                addAllDocVectors(vectors, mergeState);
                ++docCount;
                mergeState.checkAbort.work(300);
              }
            }
          } else {
            final Fields vectors = reader.getTermVectors(i);
            addAllDocVectors(vectors, mergeState);
            ++docCount;
            mergeState.checkAbort.work(300);
            i = nextLiveDoc(i + 1, liveDocs, maxDoc);
          }
View Full Code Here

          continue;
        }
       
        // NOTE: it's very important to first assign to vectors then pass it to
        // termVectorsWriter.addAllDocVectors; see LUCENE-1282
        Fields vectors = reader.getTermVectors(docNum);
        addAllDocVectors(vectors, mergeState);
        totalNumDocs++;
        mergeState.checkAbort.work(300);
      }
    }
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.Fields

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.