Package org.apache.lucene.index

Examples of org.apache.lucene.index.Fields


      if (directory.fileExists(fileName)) {
        IndexInput input = directory.openInput(fileName, IOContext.READONCE);
        segmentTraces = read(input);
        input.close();
      } else {
        Fields fields = atomicReader.fields();
        for (String field : fields) {
          LOG.debug("Context [{1}] sampling field [{0}].", field, context);
          Terms terms = fields.terms(field);
          boolean hasOffsets = terms.hasOffsets();
          boolean hasPayloads = terms.hasPayloads();
          boolean hasPositions = terms.hasPositions();

          tracer.initTrace(segmentReader, field, hasPositions, hasPayloads, hasOffsets);
View Full Code Here


      Text key = new Text();
      int numberOfShards = _shardContext.getTableContext().getDescriptor().getShardCount();
      int shardId = BlurUtil.getShardIndex(shard);
      for (AtomicReaderContext context : leaves) {
        AtomicReader atomicReader = context.reader();
        Fields fields = atomicReader.fields();
        Terms terms = fields.terms(BlurConstants.ROW_ID);
        if (terms != null) {
          TermsEnum termsEnum = terms.iterator(null);
          BytesRef ref = null;
          while ((ref = termsEnum.next()) != null) {
            key.set(ref.bytes, ref.offset, ref.length);
View Full Code Here

      return in;
    }

    @Override
    public Fields fields() throws IOException {
      Fields fields = super.fields();
      if (fields == null) {
        return null;
      }
      return new ExitableFields(fields, _exitObject);
    }
View Full Code Here

    }
  }

  private static void applyFamily(OpenBitSet bits, String family, SegmentReader segmentReader, int primeDocRowId,
      int numberOfDocsInRow, Bits liveDocs) throws IOException {
    Fields fields = segmentReader.fields();
    Terms terms = fields.terms(BlurConstants.FAMILY);
    TermsEnum iterator = terms.iterator(null);
    BytesRef text = new BytesRef(family);
    int lastDocId = primeDocRowId + numberOfDocsInRow;
    if (iterator.seekExact(text, true)) {
      DocsEnum docs = iterator.docs(liveDocs, null, DocsEnum.FLAG_NONE);
View Full Code Here

      if (directory.fileExists(fileName)) {
        IndexInput input = directory.openInput(fileName, IOContext.READONCE);
        segmentTraces = read(input);
        input.close();
      } else {
        Fields fields = atomicReader.fields();
        for (String field : fields) {
          LOG.debug("Context [{1}] sampling field [{0}].", field, context);
          Terms terms = fields.terms(field);
          boolean hasOffsets = terms.hasOffsets();
          boolean hasPayloads = terms.hasPayloads();
          boolean hasPositions = terms.hasPositions();

          tracer.initTrace(segmentReader, field, hasPositions, hasPayloads, hasOffsets);
View Full Code Here

  }

  private void duellReaders(CompositeReader other, AtomicReader memIndexReader)
      throws IOException {
    AtomicReader competitor = new SlowCompositeReaderWrapper(other);
    Fields memFields = memIndexReader.fields();
    for (String field : competitor.fields()) {
      Terms memTerms = memFields.terms(field);
      Terms iwTerms = memIndexReader.terms(field);
      if (iwTerms == null) {
        assertNull(memTerms);
      } else {
        NumericDocValues normValues = competitor.getNormValues(field);
View Full Code Here

  }

  @Override
  public Fields get(int docID) throws IOException {
    if (tvx != null) {
      Fields fields = new TVFields(docID);
      if (fields.size() == 0) {
        // TODO: we can improve writer here, eg write 0 into
        // tvx file, so we know on first read from tvx that
        // this doc has no TVs
        return null;
      } else {
View Full Code Here

  }

  @Override
  public Fields get(int docID) throws IOException {
    if (tvx != null) {
      Fields fields = new TVFields(docID);
      if (fields.size() == 0) {
        // TODO: we can improve writer here, eg write 0 into
        // tvx file, so we know on first read from tvx that
        // this doc has no TVs
        return null;
      } else {
View Full Code Here

          || matchingVectorsReader.getCompressionMode() != compressionMode
          || matchingVectorsReader.getChunkSize() != chunkSize
          || matchingVectorsReader.getPackedIntsVersion() != PackedInts.VERSION_CURRENT) {
        // naive merge...
        for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
          final Fields vectors = reader.getTermVectors(i);
          addAllDocVectors(vectors, mergeState);
          ++docCount;
          mergeState.checkAbort.work(300);
        }
      } else {
        final CompressingStoredFieldsIndexReader index = matchingVectorsReader.getIndex();
        final IndexInput vectorsStream = matchingVectorsReader.getVectorsStream();
        for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; ) {
          if (pendingDocs.isEmpty()
              && (i == 0 || index.getStartPointer(i - 1) < index.getStartPointer(i))) { // start of a chunk
            final long startPointer = index.getStartPointer(i);
            vectorsStream.seek(startPointer);
            final int docBase = vectorsStream.readVInt();
            final int chunkDocs = vectorsStream.readVInt();
            assert docBase + chunkDocs <= matchingSegmentReader.maxDoc();
            if (docBase + chunkDocs < matchingSegmentReader.maxDoc()
                && nextDeletedDoc(docBase, liveDocs, docBase + chunkDocs) == docBase + chunkDocs) {
              final long chunkEnd = index.getStartPointer(docBase + chunkDocs);
              final long chunkLength = chunkEnd - vectorsStream.getFilePointer();
              indexWriter.writeIndex(chunkDocs, this.vectorsStream.getFilePointer());
              this.vectorsStream.writeVInt(docCount);
              this.vectorsStream.writeVInt(chunkDocs);
              this.vectorsStream.copyBytes(vectorsStream, chunkLength);
              docCount += chunkDocs;
              this.numDocs += chunkDocs;
              mergeState.checkAbort.work(300 * chunkDocs);
              i = nextLiveDoc(docBase + chunkDocs, liveDocs, maxDoc);
            } else {
              for (; i < docBase + chunkDocs; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
                final Fields vectors = reader.getTermVectors(i);
                addAllDocVectors(vectors, mergeState);
                ++docCount;
                mergeState.checkAbort.work(300);
              }
            }
          } else {
            final Fields vectors = reader.getTermVectors(i);
            addAllDocVectors(vectors, mergeState);
            ++docCount;
            mergeState.checkAbort.work(300);
            i = nextLiveDoc(i + 1, liveDocs, maxDoc);
          }
View Full Code Here

          continue;
        }
       
        // NOTE: it's very important to first assign to vectors then pass it to
        // termVectorsWriter.addAllDocVectors; see LUCENE-1282
        Fields vectors = reader.getTermVectors(docNum);
        addAllDocVectors(vectors, mergeState);
        totalNumDocs++;
        mergeState.checkAbort.work(300);
      }
    }
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.Fields

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.