Package org.apache.lucene.index

Examples of org.apache.lucene.index.AtomicReader


    if (startWith == null) {
      startWith = "";
    }
    Term term = getTerm(columnFamily, columnName, startWith);
    List<String> terms = new ArrayList<String>(size);
    AtomicReader areader = BlurUtil.getAtomicReader(reader);
    Terms termsAll = areader.terms(term.field());
    TermsEnum termEnum = termsAll.iterator(null);
    BytesRef currentTermText;
    while ((currentTermText = termEnum.next()) != null) {
      terms.add(currentTermText.utf8ToString());
      if (terms.size() >= size) {
View Full Code Here


          dims[dim] = new DrillSidewaysScorer.DocsEnumsAndFreq();
          dims[dim].sidewaysCollector = drillSidewaysCollectors[dim];
          String field = drillDownTerms[dim][0].field();
          dims[dim].dim = drillDownTerms[dim][0].text();
          if (lastField == null || !lastField.equals(field)) {
            AtomicReader reader = context.reader();
            Terms terms = reader.terms(field);
            if (terms != null) {
              termsEnum = terms.iterator(null);
            } else {
              termsEnum = null;
            }
View Full Code Here

    }
   
    DocIdSetIterator it = set.iterator();
    int doc = it.nextDoc();
    for (AtomicReaderContext context : reader.leaves()) {
      AtomicReader r = context.reader();
      final int maxDoc = r.maxDoc() + context.docBase;
      if (doc >= maxDoc) { // skip this segment
        continue;
      }
      if (!r.hasDeletions()) { // skip all docs that belong to this reader as it has no deletions
        while ((doc = it.nextDoc()) < maxDoc) {}
        continue;
      }
      Bits liveDocs = r.getLiveDocs();
      do {
        if (!liveDocs.get(doc - context.docBase)) {
          set.clear(doc);
        }
      } while ((doc = it.nextDoc()) < maxDoc);
View Full Code Here

      }
      bi.setText(content);
      int doc = docids[i];
      int leaf = ReaderUtil.subIndex(doc, leaves);
      AtomicReaderContext subContext = leaves.get(leaf);
      AtomicReader r = subContext.reader();
      Terms t = r.terms(field);
      if (t == null) {
        continue; // nothing to do
      }
      if (leaf != lastLeaf) {
        termsEnum = t.iterator(null);
View Full Code Here

  public void testEmptyIndex() throws Exception {
    Directory dir = newDirectory();
    IndexWriter writer= new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(500));
    writer.close();
    IndexReader r = DirectoryReader.open(dir);
    AtomicReader reader = SlowCompositeReaderWrapper.wrap(r);
    FieldCache.DEFAULT.getTerms(reader, "foobar", true);
    FieldCache.DEFAULT.getTermsIndex(reader, "foobar");
    FieldCache.DEFAULT.purgeByCacheKey(reader.getCoreCacheKey());
    r.close();
    dir.close();
  }
View Full Code Here

      doc.add(new SortedSetDocValuesField("sortedset", new BytesRef("sortedset value2")));
    }
    iw.addDocument(doc);
    DirectoryReader ir = iw.getReader();
    iw.close();
    AtomicReader ar = getOnlySegmentReader(ir);
   
    BytesRef scratch = new BytesRef();
   
    // Binary type: can be retrieved via getTerms()
    try {
View Full Code Here

    Document doc = new Document();
    iw.addDocument(doc);
    DirectoryReader ir = iw.getReader();
    iw.close();
   
    AtomicReader ar = getOnlySegmentReader(ir);
   
    final FieldCache cache = FieldCache.DEFAULT;
    cache.purgeAllCaches();
    assertEquals(0, cache.getCacheEntries().length);
   
View Full Code Here

    doc.add(new StoredField("bogusbits", "bogus"));
    iw.addDocument(doc);
    DirectoryReader ir = iw.getReader();
    iw.close();
   
    AtomicReader ar = getOnlySegmentReader(ir);
   
    final FieldCache cache = FieldCache.DEFAULT;
    cache.purgeAllCaches();
    assertEquals(0, cache.getCacheEntries().length);
   
View Full Code Here

   * <p>
   * The default implementation calls {@link #addSortedField}, passing
   * an Iterable that merges ordinals and values and filters deleted documents .
   */
  public void mergeSortedField(FieldInfo fieldInfo, final MergeState mergeState, List<SortedDocValues> toMerge) throws IOException {
    final AtomicReader readers[] = mergeState.readers.toArray(new AtomicReader[toMerge.size()]);
    final SortedDocValues dvs[] = toMerge.toArray(new SortedDocValues[toMerge.size()]);
   
    // step 1: iterate thru each sub and mark terms still in use
    TermsEnum liveTerms[] = new TermsEnum[dvs.length];
    for (int sub = 0; sub < liveTerms.length; sub++) {
      AtomicReader reader = readers[sub];
      SortedDocValues dv = dvs[sub];
      Bits liveDocs = reader.getLiveDocs();
      if (liveDocs == null) {
        liveTerms[sub] = dv.termsEnum();
      } else {
        OpenBitSet bitset = new OpenBitSet(dv.getValueCount());
        for (int i = 0; i < reader.maxDoc(); i++) {
          if (liveDocs.get(i)) {
            int ord = dv.getOrd(i);
            if (ord >= 0) {
              bitset.set(ord);
            }
View Full Code Here

   * <p>
   * The default implementation calls {@link #addSortedSetField}, passing
   * an Iterable that merges ordinals and values and filters deleted documents .
   */
  public void mergeSortedSetField(FieldInfo fieldInfo, final MergeState mergeState, List<SortedSetDocValues> toMerge) throws IOException {
    final AtomicReader readers[] = mergeState.readers.toArray(new AtomicReader[toMerge.size()]);
    final SortedSetDocValues dvs[] = toMerge.toArray(new SortedSetDocValues[toMerge.size()]);
   
    // step 1: iterate thru each sub and mark terms still in use
    TermsEnum liveTerms[] = new TermsEnum[dvs.length];
    for (int sub = 0; sub < liveTerms.length; sub++) {
      AtomicReader reader = readers[sub];
      SortedSetDocValues dv = dvs[sub];
      Bits liveDocs = reader.getLiveDocs();
      if (liveDocs == null) {
        liveTerms[sub] = dv.termsEnum();
      } else {
        OpenBitSet bitset = new OpenBitSet(dv.getValueCount());
        for (int i = 0; i < reader.maxDoc(); i++) {
          if (liveDocs.get(i)) {
            dv.setDocument(i);
            long ord;
            while ((ord = dv.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
              bitset.set(ord);
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.AtomicReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.