Package org.apache.lucene.index

Examples of org.apache.lucene.index.Fields


    IndexReader reader = DirectoryReader.open(benchmark.getRunData().getDirectory());
    assertEquals(NUM_DOCS, reader.numDocs());

    int totalTokenCount2 = 0;

    Fields fields = MultiFields.getFields(reader);

    for (String fieldName : fields) {
      if (fieldName.equals(DocMaker.ID_FIELD) || fieldName.equals(DocMaker.DATE_MSEC_FIELD) || fieldName.equals(DocMaker.TIME_SEC_FIELD)) {
        continue;
      }
      Terms terms = fields.terms(fieldName);
      if (terms == null) {
        continue;
      }
      TermsEnum termsEnum = terms.iterator(null);
      DocsEnum docs = null;
View Full Code Here


    assertEquals(3, ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor());
    assertEquals(0.0d, writer.getConfig().getMergePolicy().getNoCFSRatio(), 0.0);
    writer.close();
    Directory dir = benchmark.getRunData().getDirectory();
    IndexReader reader = DirectoryReader.open(dir);
    Fields tfv = reader.getTermVectors(0);
    assertNotNull(tfv);
    assertTrue(tfv.size() > 0);
    reader.close();
  }
View Full Code Here

      in.close();
    }

    @Override
    public Fields get(int doc) throws IOException {
      Fields fields = in.get(doc);
      return fields == null ? null : new AssertingAtomicReader.AssertingFields(fields);
    }
View Full Code Here

        hit.setStoredFields(reader.document(fdoc.doc));
      }
      if (termVectorsToFetch != null && termVectorsToFetch.size() > 0) {
        Map<String, List<BoboTerm>> tvMap = new HashMap<String, List<BoboTerm>>();
        hit.setTermVectorMap(tvMap);
        Fields fds = reader.getTermVectors(fdoc.doc);
        for (String field : termVectorsToFetch) {
          Terms terms = fds.terms(field);
          if (terms == null) {
            continue;
          }
          TermsEnum termsEnum = terms.iterator(null);
          BytesRef text;
View Full Code Here

  }

  private void countTerms() throws Exception {
    termCounts = new HashMap<String,FieldTermCount>();
    numTerms = 0;
    Fields fields = MultiFields.getFields(reader);   
    TermsEnum te = null;
    for (String fld : fields) {
      FieldTermCount ftc = new FieldTermCount();
      ftc.fieldname = fld;
      Terms terms = fields.terms(fld);
      if (terms != null) { // count terms
        te = terms.iterator(te);
        while (te.next() != null) {
          ftc.termCount++;
          numTerms++;
View Full Code Here

  public static TermStats[] getHighFreqTerms(IndexReader reader, int numTerms, String[] fieldNames) throws Exception {
    TermStatsQueue tiq = null;
    TermsEnum te = null;
   
    if (fieldNames != null) {
      Fields fields = MultiFields.getFields(reader);
      if (fields == null) {
        LOG.info("Index with no fields - probably empty or corrupted");
        return EMPTY_STATS;
      }
      tiq = new TermStatsQueue(numTerms);
      for (String field : fieldNames) {
        Terms terms = fields.terms(field);
        if (terms != null) {
          te = terms.iterator(te);
          fillQueue(te, tiq, field);
        }
      }
    } else {
      Fields fields = MultiFields.getFields(reader);
      if (fields == null) {
        LOG.info("Index with no fields - probably empty or corrupted");
        return EMPTY_STATS;
      }
      tiq = new TermStatsQueue(numTerms);     
      while (true) {
        String field = fields.iterator().next();
        if (field != null) {
          Terms terms = fields.terms(field);
          te = terms.iterator(te);
          fillQueue(te, tiq, field);
        } else {
          break;
        }
View Full Code Here

                          final TermCollector collector)
  throws IOException {
    final IndexReaderContext topReaderContext = reader.getContext();
    Comparator<BytesRef> lastTermComp = null;
    for (final AtomicReaderContext context : topReaderContext.leaves()) {
      final Fields fields = context.reader().fields();
      if (fields == null) {
        // reader has no fields
        continue;
      }

      final Terms terms = fields.terms(query.field);
      if (terms == null) {
        // field does not exist
        continue;
      }
View Full Code Here

      Text key = new Text();
      int numberOfShards = _shardContext.getTableContext().getDescriptor().getShardCount();
      int shardId = BlurUtil.getShardIndex(shard);
      for (AtomicReaderContext context : leaves) {
        AtomicReader atomicReader = context.reader();
        Fields fields = atomicReader.fields();
        Terms terms = fields.terms(BlurConstants.ROW_ID);
        if (terms != null) {
          TermsEnum termsEnum = terms.iterator(null);
          BytesRef ref = null;
          while ((ref = termsEnum.next()) != null) {
            key.set(ref.bytes, ref.offset, ref.length);
View Full Code Here

    String context = shardContext.getTableContext().getTable() + "/" + shardContext.getShard();
    Map<String, List<IndexTracerResult>> sampleIndex = indexWarmup.sampleIndex(reader, context);
    if (preCacheCols != null) {
      warm(reader, preCacheCols, indexWarmup, sampleIndex, context, isClosed);
    } else {
      Fields fields = reader.fields();
      warm(reader, fields, indexWarmup, sampleIndex, context, isClosed);
    }
  }
View Full Code Here

  public static TokenStream getAnyTokenStream(IndexReader reader, int docId,
      String field, Document doc, Analyzer analyzer) throws IOException {
    TokenStream ts = null;

    Fields vectors = reader.getTermVectors(docId);
    if (vectors != null) {
      Terms vector = vectors.terms(field);
      if (vector != null) {
        ts = getTokenStream(vector);
      }
    }
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.Fields

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.