Package org.apache.lucene.index

Examples of org.apache.lucene.index.DirectoryReader


 
  public void testRandomIndex() throws IOException {
    Directory dir = newDirectory();
    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
    createRandomIndex(atLeast(50), w, random().nextLong());
    DirectoryReader reader = w.getReader();
    AtomicReader wrapper = SlowCompositeReaderWrapper.wrap(reader);
    String field = "body";
    Terms terms = wrapper.terms(field);
    PriorityQueue<TermAndFreq> lowFreqQueue = new PriorityQueue<CommonTermsQueryTest.TermAndFreq>(
        5) {
     
      @Override
      protected boolean lessThan(TermAndFreq a, TermAndFreq b) {
        return a.freq > b.freq;
      }
     
    };
    PriorityQueue<TermAndFreq> highFreqQueue = new PriorityQueue<CommonTermsQueryTest.TermAndFreq>(
        5) {
     
      @Override
      protected boolean lessThan(TermAndFreq a, TermAndFreq b) {
        return a.freq < b.freq;
      }
     
    };
    try {
      TermsEnum iterator = terms.iterator(null);
      while (iterator.next() != null) {
        if (highFreqQueue.size() < 5) {
          highFreqQueue.add(new TermAndFreq(
              BytesRef.deepCopyOf(iterator.term()), iterator.docFreq()));
          lowFreqQueue.add(new TermAndFreq(
              BytesRef.deepCopyOf(iterator.term()), iterator.docFreq()));
        } else {
          if (highFreqQueue.top().freq < iterator.docFreq()) {
            highFreqQueue.top().freq = iterator.docFreq();
            highFreqQueue.top().term = BytesRef.deepCopyOf(iterator.term());
            highFreqQueue.updateTop();
          }
         
          if (lowFreqQueue.top().freq > iterator.docFreq()) {
            lowFreqQueue.top().freq = iterator.docFreq();
            lowFreqQueue.top().term = BytesRef.deepCopyOf(iterator.term());
            lowFreqQueue.updateTop();
          }
        }
      }
      int lowFreq = lowFreqQueue.top().freq;
      int highFreq = highFreqQueue.top().freq;
      assumeTrue("unlucky index", highFreq - 1 > lowFreq);
      List<TermAndFreq> highTerms = queueToList(highFreqQueue);
      List<TermAndFreq> lowTerms = queueToList(lowFreqQueue);
     
      IndexSearcher searcher = newSearcher(reader);
      Occur lowFreqOccur = randomOccur(random());
      BooleanQuery verifyQuery = new BooleanQuery();
      CommonTermsQuery cq = new CommonTermsQuery(randomOccur(random()),
          lowFreqOccur, highFreq - 1, random().nextBoolean());
      for (TermAndFreq termAndFreq : lowTerms) {
        cq.add(new Term(field, termAndFreq.term));
        verifyQuery.add(new BooleanClause(new TermQuery(new Term(field,
            termAndFreq.term)), lowFreqOccur));
      }
      for (TermAndFreq termAndFreq : highTerms) {
        cq.add(new Term(field, termAndFreq.term));
      }
     
      TopDocs cqSearch = searcher.search(cq, reader.maxDoc());
     
      TopDocs verifySearch = searcher.search(verifyQuery, reader.maxDoc());
      assertEquals(verifySearch.totalHits, cqSearch.totalHits);
      Set<Integer> hits = new HashSet<Integer>();
      for (ScoreDoc doc : verifySearch.scoreDocs) {
        hits.add(doc.doc);
      }
     
      for (ScoreDoc doc : cqSearch.scoreDocs) {
        assertTrue(hits.remove(doc.doc));
      }
     
      assertTrue(hits.isEmpty());
     
      /*
       *  need to force merge here since QueryUtils adds checks based
       *  on leave readers which have different statistics than the top
       *  level reader if we have more than one segment. This could
       *  result in a different query / results.
       */
      w.forceMerge(1);
      DirectoryReader reader2 = w.getReader();
      QueryUtils.check(random(), cq, newSearcher(reader2));
      reader2.close();
    } finally {
      reader.close();
      wrapper.close();
      w.close();
      dir.close();
View Full Code Here


    for(List<Document> docs : updateDocs) {
      // Just replaces docs w/ same docs:
      w.updateDocuments(new Term("group", docs.get(0).get("group")), docs);
    }

    final DirectoryReader r = w.getReader();
    w.close();

    return r;
  }
View Full Code Here

      }

      final GroupDoc[] groupDocsByID = new GroupDoc[groupDocs.length];
      System.arraycopy(groupDocs, 0, groupDocsByID, 0, groupDocs.length);

      final DirectoryReader r = w.getReader();
      w.close();

      // NOTE: intentional but temporary field cache insanity!
      final FieldCache.Ints docIDToID = FieldCache.DEFAULT.getInts(SlowCompositeReaderWrapper.wrap(r), "id", false);
      DirectoryReader rBlocks = null;
      Directory dirBlocks = null;

      try {
        final IndexSearcher s = newSearcher(r);
        if (VERBOSE) {
          System.out.println("\nTEST: searcher=" + s);
        }

        if (SlowCompositeReaderWrapper.class.isAssignableFrom(s.getIndexReader().getClass())) {
          canUseIDV = false;
        } else {
          canUseIDV = !preFlex;
        }
        final ShardState shards = new ShardState(s);

        for(int contentID=0;contentID<3;contentID++) {
          final ScoreDoc[] hits = s.search(new TermQuery(new Term("content", "real"+contentID)), numDocs).scoreDocs;
          for(ScoreDoc hit : hits) {
            final GroupDoc gd = groupDocs[docIDToID.get(hit.doc)];
            assertTrue(gd.score == 0.0);
            gd.score = hit.score;
            assertEquals(gd.id, docIDToID.get(hit.doc));
          }
        }

        for(GroupDoc gd : groupDocs) {
          assertTrue(gd.score != 0.0);
        }

        // Build 2nd index, where docs are added in blocks by
        // group, so we can use single pass collector
        dirBlocks = newDirectory();
        rBlocks = getDocBlockReader(dirBlocks, groupDocs);
        final Filter lastDocInBlock = new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("groupend", "x"))));
        final FieldCache.Ints docIDToIDBlocks = FieldCache.DEFAULT.getInts(SlowCompositeReaderWrapper.wrap(rBlocks), "id", false);

        final IndexSearcher sBlocks = newSearcher(rBlocks);
        final ShardState shardsBlocks = new ShardState(sBlocks);

        // ReaderBlocks only increases maxDoc() vs reader, which
        // means a monotonic shift in scores, so we can
        // reliably remap them w/ Map:
        final Map<String,Map<Float,Float>> scoreMap = new HashMap<String,Map<Float,Float>>();

        // Tricky: must separately set .score2, because the doc
        // block index was created with possible deletions!
        //System.out.println("fixup score2");
        for(int contentID=0;contentID<3;contentID++) {
          //System.out.println("  term=real" + contentID);
          final Map<Float,Float> termScoreMap = new HashMap<Float,Float>();
          scoreMap.put("real"+contentID, termScoreMap);
          //System.out.println("term=real" + contentID + " dfold=" + s.docFreq(new Term("content", "real"+contentID)) +
          //" dfnew=" + sBlocks.docFreq(new Term("content", "real"+contentID)));
          final ScoreDoc[] hits = sBlocks.search(new TermQuery(new Term("content", "real"+contentID)), numDocs).scoreDocs;
          for(ScoreDoc hit : hits) {
            final GroupDoc gd = groupDocsByID[docIDToIDBlocks.get(hit.doc)];
            assertTrue(gd.score2 == 0.0);
            gd.score2 = hit.score;
            assertEquals(gd.id, docIDToIDBlocks.get(hit.doc));
            //System.out.println("    score=" + gd.score + " score2=" + hit.score + " id=" + docIDToIDBlocks.get(hit.doc));
            termScoreMap.put(gd.score, gd.score2);
          }
        }

        for(int searchIter=0;searchIter<100;searchIter++) {

          if (VERBOSE) {
            System.out.println("\nTEST: searchIter=" + searchIter);
          }

          final String searchTerm = "real" + random().nextInt(3);
          final boolean fillFields = random().nextBoolean();
          boolean getScores = random().nextBoolean();
          final boolean getMaxScores = random().nextBoolean();
          final Sort groupSort = getRandomSort();
          //final Sort groupSort = new Sort(new SortField[] {new SortField("sort1", SortField.STRING), new SortField("id", SortField.INT)});
          // TODO: also test null (= sort by relevance)
          final Sort docSort = getRandomSort();

          for(SortField sf : docSort.getSort()) {
            if (sf.getType() == SortField.Type.SCORE) {
              getScores = true;
              break;
            }
          }

          for(SortField sf : groupSort.getSort()) {
            if (sf.getType() == SortField.Type.SCORE) {
              getScores = true;
              break;
            }
          }

          final int topNGroups = _TestUtil.nextInt(random(), 1, 30);
          //final int topNGroups = 10;
          final int docsPerGroup = _TestUtil.nextInt(random(), 1, 50);

          final int groupOffset = _TestUtil.nextInt(random(), 0, (topNGroups-1)/2);
          //final int groupOffset = 0;

          final int docOffset = _TestUtil.nextInt(random(), 0, docsPerGroup-1);
          //final int docOffset = 0;

          final boolean doCache = random().nextBoolean();
          final boolean doAllGroups = random().nextBoolean();
          if (VERBOSE) {
            System.out.println("TEST: groupSort=" + groupSort + " docSort=" + docSort + " searchTerm=" + searchTerm + " dF=" + r.docFreq(new Term("content", searchTerm))  +" dFBlock=" + rBlocks.docFreq(new Term("content", searchTerm)) + " topNGroups=" + topNGroups + " groupOffset=" + groupOffset + " docOffset=" + docOffset + " doCache=" + doCache + " docsPerGroup=" + docsPerGroup + " doAllGroups=" + doAllGroups + " getScores=" + getScores + " getMaxScores=" + getMaxScores);
          }

          String groupField = "group";
          if (canUseIDV && random().nextBoolean()) {
            groupField += "_dv";
          }
          if (VERBOSE) {
            System.out.println("  groupField=" + groupField);
          }
          final AbstractFirstPassGroupingCollector<?> c1 = createRandomFirstPassCollector(groupField, groupSort, groupOffset+topNGroups);
          final CachingCollector cCache;
          final Collector c;

          final AbstractAllGroupsCollector<?> allGroupsCollector;
          if (doAllGroups) {
            allGroupsCollector = createAllGroupsCollector(c1, groupField);
          } else {
            allGroupsCollector = null;
          }

          final boolean useWrappingCollector = random().nextBoolean();

          if (doCache) {
            final double maxCacheMB = random().nextDouble();
            if (VERBOSE) {
              System.out.println("TEST: maxCacheMB=" + maxCacheMB);
            }

            if (useWrappingCollector) {
              if (doAllGroups) {
                cCache = CachingCollector.create(c1, true, maxCacheMB);
                c = MultiCollector.wrap(cCache, allGroupsCollector);
              } else {
                c = cCache = CachingCollector.create(c1, true, maxCacheMB);
              }
            } else {
              // Collect only into cache, then replay multiple times:
              c = cCache = CachingCollector.create(false, true, maxCacheMB);
            }
          } else {
            cCache = null;
            if (doAllGroups) {
              c = MultiCollector.wrap(c1, allGroupsCollector);
            } else {
              c = c1;
            }
          }

          // Search top reader:
          final Query query = new TermQuery(new Term("content", searchTerm));

          s.search(query, c);

          if (doCache && !useWrappingCollector) {
            if (cCache.isCached()) {
              // Replay for first-pass grouping
              cCache.replay(c1);
              if (doAllGroups) {
                // Replay for all groups:
                cCache.replay(allGroupsCollector);
              }
            } else {
              // Replay by re-running search:
              s.search(query, c1);
              if (doAllGroups) {
                s.search(query, allGroupsCollector);
              }
            }
          }

          // Get 1st pass top groups
          final Collection<SearchGroup<BytesRef>> topGroups = getSearchGroups(c1, groupOffset, fillFields);
          final TopGroups<BytesRef> groupsResult;
          if (VERBOSE) {
            System.out.println("TEST: first pass topGroups");
            if (topGroups == null) {
              System.out.println("  null");
            } else {
              for (SearchGroup<BytesRef> searchGroup : topGroups) {
                System.out.println("  " + (searchGroup.groupValue == null ? "null" : searchGroup.groupValue) + ": " + Arrays.deepToString(searchGroup.sortValues));
              }
            }
          }

          // Get 1st pass top groups using shards

          ValueHolder<Boolean> idvBasedImplsUsedSharded = new ValueHolder<Boolean>(false);
          final TopGroups<BytesRef> topGroupsShards = searchShards(s, shards.subSearchers, query, groupSort, docSort,
              groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores, canUseIDV, preFlex, idvBasedImplsUsedSharded);
          final AbstractSecondPassGroupingCollector<?> c2;
          if (topGroups != null) {

            if (VERBOSE) {
              System.out.println("TEST: topGroups");
              for (SearchGroup<BytesRef> searchGroup : topGroups) {
                System.out.println("  " + (searchGroup.groupValue == null ? "null" : searchGroup.groupValue.utf8ToString()) + ": " + Arrays.deepToString(searchGroup.sortValues));
              }
            }

            c2 = createSecondPassCollector(c1, groupField, groupSort, docSort, groupOffset, docOffset + docsPerGroup, getScores, getMaxScores, fillFields);
            if (doCache) {
              if (cCache.isCached()) {
                if (VERBOSE) {
                  System.out.println("TEST: cache is intact");
                }
                cCache.replay(c2);
              } else {
                if (VERBOSE) {
                  System.out.println("TEST: cache was too large");
                }
                s.search(query, c2);
              }
            } else {
              s.search(query, c2);
            }

            if (doAllGroups) {
              TopGroups<BytesRef> tempTopGroups = getTopGroups(c2, docOffset);
              groupsResult = new TopGroups<BytesRef>(tempTopGroups, allGroupsCollector.getGroupCount());
            } else {
              groupsResult = getTopGroups(c2, docOffset);
            }
          } else {
            c2 = null;
            groupsResult = null;
            if (VERBOSE) {
              System.out.println("TEST:   no results");
            }
          }

          final TopGroups<BytesRef> expectedGroups = slowGrouping(groupDocs, searchTerm, fillFields, getScores, getMaxScores, doAllGroups, groupSort, docSort, topNGroups, docsPerGroup, groupOffset, docOffset);

          if (VERBOSE) {
            if (expectedGroups == null) {
              System.out.println("TEST: no expected groups");
            } else {
              System.out.println("TEST: expected groups totalGroupedHitCount=" + expectedGroups.totalGroupedHitCount);
              for(GroupDocs<BytesRef> gd : expectedGroups.groups) {
                System.out.println("  group=" + (gd.groupValue == null ? "null" : gd.groupValue) + " totalHits=" + gd.totalHits + " scoreDocs.len=" + gd.scoreDocs.length);
                for(ScoreDoc sd : gd.scoreDocs) {
                  System.out.println("    id=" + sd.doc + " score=" + sd.score);
                }
              }
            }

            if (groupsResult == null) {
              System.out.println("TEST: no matched groups");
            } else {
              System.out.println("TEST: matched groups totalGroupedHitCount=" + groupsResult.totalGroupedHitCount);
              for(GroupDocs<BytesRef> gd : groupsResult.groups) {
                System.out.println("  group=" + (gd.groupValue == null ? "null" : gd.groupValue) + " totalHits=" + gd.totalHits);
                for(ScoreDoc sd : gd.scoreDocs) {
                  System.out.println("    id=" + docIDToID.get(sd.doc) + " score=" + sd.score);
                }
              }

              if (searchIter == 14) {
                for(int docIDX=0;docIDX<s.getIndexReader().maxDoc();docIDX++) {
                  System.out.println("ID=" + docIDToID.get(docIDX) + " explain=" + s.explain(query, docIDX));
                }
              }
            }

            if (topGroupsShards == null) {
              System.out.println("TEST: no matched-merged groups");
            } else {
              System.out.println("TEST: matched-merged groups totalGroupedHitCount=" + topGroupsShards.totalGroupedHitCount);
              for(GroupDocs<BytesRef> gd : topGroupsShards.groups) {
                System.out.println("  group=" + (gd.groupValue == null ? "null" : gd.groupValue) + " totalHits=" + gd.totalHits);
                for(ScoreDoc sd : gd.scoreDocs) {
                  System.out.println("    id=" + docIDToID.get(sd.doc) + " score=" + sd.score);
                }
              }
            }
          }

          assertEquals(docIDToID, expectedGroups, groupsResult, true, true, true, getScores, groupField.endsWith("_dv"));

          // Confirm merged shards match:
          assertEquals(docIDToID, expectedGroups, topGroupsShards, true, false, fillFields, getScores, idvBasedImplsUsedSharded.value);
          if (topGroupsShards != null) {
            verifyShards(shards.docStarts, topGroupsShards);
          }

          final boolean needsScores = getScores || getMaxScores || docSort == null;
          final BlockGroupingCollector c3 = new BlockGroupingCollector(groupSort, groupOffset+topNGroups, needsScores, lastDocInBlock);
          final TermAllGroupsCollector allGroupsCollector2;
          final Collector c4;
          if (doAllGroups) {
            // NOTE: must be "group" and not "group_dv"
            // (groupField) because we didn't index doc
            // values in the block index:
            allGroupsCollector2 = new TermAllGroupsCollector("group");
            c4 = MultiCollector.wrap(c3, allGroupsCollector2);
          } else {
            allGroupsCollector2 = null;
            c4 = c3;
          }
          // Get block grouping result:
          sBlocks.search(query, c4);
          @SuppressWarnings({"unchecked","rawtypes"})
          final TopGroups<BytesRef> tempTopGroupsBlocks = (TopGroups<BytesRef>) c3.getTopGroups(docSort, groupOffset, docOffset, docOffset+docsPerGroup, fillFields);
          final TopGroups<BytesRef> groupsResultBlocks;
          if (doAllGroups && tempTopGroupsBlocks != null) {
            assertEquals((int) tempTopGroupsBlocks.totalGroupCount, allGroupsCollector2.getGroupCount());
            groupsResultBlocks = new TopGroups<BytesRef>(tempTopGroupsBlocks, allGroupsCollector2.getGroupCount());
          } else {
            groupsResultBlocks = tempTopGroupsBlocks;
          }

          if (VERBOSE) {
            if (groupsResultBlocks == null) {
              System.out.println("TEST: no block groups");
            } else {
              System.out.println("TEST: block groups totalGroupedHitCount=" + groupsResultBlocks.totalGroupedHitCount);
              boolean first = true;
              for(GroupDocs<BytesRef> gd : groupsResultBlocks.groups) {
                System.out.println("  group=" + (gd.groupValue == null ? "null" : gd.groupValue.utf8ToString()) + " totalHits=" + gd.totalHits);
                for(ScoreDoc sd : gd.scoreDocs) {
                  System.out.println("    id=" + docIDToIDBlocks.get(sd.doc) + " score=" + sd.score);
                  if (first) {
                    System.out.println("explain: " + sBlocks.explain(query, sd.doc));
                    first = false;
                  }
                }
              }
            }
          }

          // Get shard'd block grouping result:
          // Block index does not index DocValues so we pass
          // false for canUseIDV:
          final TopGroups<BytesRef> topGroupsBlockShards = searchShards(sBlocks, shardsBlocks.subSearchers, query,
              groupSort, docSort, groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores, false, false, new ValueHolder<Boolean>(false));

          if (expectedGroups != null) {
            // Fixup scores for reader2
            for (GroupDocs<?> groupDocsHits : expectedGroups.groups) {
              for(ScoreDoc hit : groupDocsHits.scoreDocs) {
                final GroupDoc gd = groupDocsByID[hit.doc];
                assertEquals(gd.id, hit.doc);
                //System.out.println("fixup score " + hit.score + " to " + gd.score2 + " vs " + gd.score);
                hit.score = gd.score2;
              }
            }

            final SortField[] sortFields = groupSort.getSort();
            final Map<Float,Float> termScoreMap = scoreMap.get(searchTerm);
            for(int groupSortIDX=0;groupSortIDX<sortFields.length;groupSortIDX++) {
              if (sortFields[groupSortIDX].getType() == SortField.Type.SCORE) {
                for (GroupDocs<?> groupDocsHits : expectedGroups.groups) {
                  if (groupDocsHits.groupSortValues != null) {
                    //System.out.println("remap " + groupDocsHits.groupSortValues[groupSortIDX] + " to " + termScoreMap.get(groupDocsHits.groupSortValues[groupSortIDX]));
                    groupDocsHits.groupSortValues[groupSortIDX] = termScoreMap.get(groupDocsHits.groupSortValues[groupSortIDX]);
                    assertNotNull(groupDocsHits.groupSortValues[groupSortIDX]);
                  }
                }
              }
            }

            final SortField[] docSortFields = docSort.getSort();
            for(int docSortIDX=0;docSortIDX<docSortFields.length;docSortIDX++) {
              if (docSortFields[docSortIDX].getType() == SortField.Type.SCORE) {
                for (GroupDocs<?> groupDocsHits : expectedGroups.groups) {
                  for(ScoreDoc _hit : groupDocsHits.scoreDocs) {
                    FieldDoc hit = (FieldDoc) _hit;
                    if (hit.fields != null) {
                      hit.fields[docSortIDX] = termScoreMap.get(hit.fields[docSortIDX]);
                      assertNotNull(hit.fields[docSortIDX]);
                    }
                  }
                }
              }
            }
          }

          assertEquals(docIDToIDBlocks, expectedGroups, groupsResultBlocks, false, true, true, getScores, false);
          assertEquals(docIDToIDBlocks, expectedGroups, topGroupsBlockShards, false, false, fillFields, getScores, false);
        }
      } finally {
        QueryUtils.purgeFieldCache(r);
        if (rBlocks != null) {
          QueryUtils.purgeFieldCache(rBlocks);
        }
      }

      r.close();
      dir.close();

      rBlocks.close();
      dirBlocks.close();
    }
  }
View Full Code Here

    CountingSearchTestTask.numSearches = 0;
   
    // 3. execute the algorithm  (required in every "logic" test)
    Benchmark benchmark = execBenchmark(algLines);

    DirectoryReader r = DirectoryReader.open(benchmark.getRunData().getDirectory());
    SortedDocValues idx = FieldCache.DEFAULT.getTermsIndex(SlowCompositeReaderWrapper.wrap(r), "country");
    final int maxDoc = r.maxDoc();
    assertEquals(1000, maxDoc);
    for(int i=0;i<1000;i++) {
      assertTrue("doc " + i + " has null country", idx.getOrd(i) != -1);
    }
    r.close();
  }
View Full Code Here

            }
            
            assert false : "unreferenced files: before delete:\n    " + Arrays.toString(startFiles) + "\n  after delete:\n    " + Arrays.toString(endFiles) + extras;
          }

          DirectoryReader ir1 = DirectoryReader.open(this);
          int numDocs1 = ir1.numDocs();
          ir1.close();
          new IndexWriter(this, new IndexWriterConfig(LuceneTestCase.TEST_VERSION_CURRENT, null)).close();
          DirectoryReader ir2 = DirectoryReader.open(this);
          int numDocs2 = ir2.numDocs();
          ir2.close();
          assert numDocs1 == numDocs2 : "numDocs changed after opening/closing IW: before=" + numDocs1 + " after=" + numDocs2;
        }
      }
    }
    in.close();
View Full Code Here

    doc.add(new TextField("contents", dummyContent, Field.Store.YES));
    writer.addDocument(doc, analyzer);
    writer.commit();

    // try the search over the first doc
    DirectoryReader directoryReader = DirectoryReader.open(dir);
    IndexSearcher indexSearcher = newSearcher(directoryReader);
    TopDocs result = indexSearcher.search(new MatchAllDocsQuery(), 1);
    assertTrue(result.totalHits > 0);
    Document d = indexSearcher.doc(result.scoreDocs[0].doc);
    assertNotNull(d);
    assertNotNull(d.getField("title"));
    assertEquals(dummyTitle, d.getField("title").stringValue());
    assertNotNull(d.getField("contents"));
    assertEquals(dummyContent, d.getField("contents").stringValue());

    // add a second doc
    doc = new Document();
    String dogmasTitle = "dogmas";
    doc.add(new TextField("title", dogmasTitle, Field.Store.YES));
    String dogmasContents = "white men can't jump";
    doc.add(new TextField("contents", dogmasContents, Field.Store.YES));
    writer.addDocument(doc, analyzer);
    writer.commit();

    directoryReader.close();
    directoryReader = DirectoryReader.open(dir);
    indexSearcher = newSearcher(directoryReader);
    result = indexSearcher.search(new MatchAllDocsQuery(), 2);
    Document d1 = indexSearcher.doc(result.scoreDocs[1].doc);
    assertNotNull(d1);
View Full Code Here

      w.addDocument(new Document());
    }
    w.forceMerge(1);
    w.commit();
    w.close();
    DirectoryReader reader = DirectoryReader.open(d);
    return new AllDeletedFilterReader(LuceneTestCase.getOnlySegmentReader(reader));
  }
View Full Code Here

      } else {
        writer.addDocument(doc);
      }
    }

    DirectoryReader reader = writer.getReader();
    writer.close();

    return new IndexContext(searchTermToFacetToGroups, reader, numDocs, dir, facetWithMostGroups, numGroups, contentBrs, uniqueFacetValues, useDv);
  }
View Full Code Here

    writer.commit();

    attributesFacetHandler = new AttributesFacetHandler(AttributeHandlerName, AttributeHandlerName,
        null, null, new HashMap<String, String>());
    facetHandlers.add(attributesFacetHandler);
    DirectoryReader reader = DirectoryReader.open(directory);
    boboReader = BoboMultiReader.getInstance(reader, facetHandlers);
    for (BoboSegmentReader subReader : boboReader.getSubReaders()) {
      attributesFacetHandler.loadFacetData(subReader);
    }
    browser = new BoboBrowser(boboReader);
View Full Code Here

    HashMap<String, String> facetProps = new HashMap<String, String>();
    facetProps.put(AttributesFacetHandler.MAX_FACETS_PER_KEY_PROP_NAME, "1");
    attributesFacetHandler = new AttributesFacetHandler(AttributeHandlerName, AttributeHandlerName,
        null, null, facetProps);
    facetHandlers.add(attributesFacetHandler);
    DirectoryReader reader = DirectoryReader.open(directory);
    boboReader = BoboMultiReader.getInstance(reader, facetHandlers);
    for (BoboSegmentReader subReader : boboReader.getSubReaders()) {
      attributesFacetHandler.loadFacetData(subReader);
    }
    browser = new BoboBrowser(boboReader);
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.DirectoryReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.