public void testEmptyIndex()
throws Exception
{
// creating two directories for indices
Directory indexStoreA = new MockRAMDirectory();
Directory indexStoreB = new MockRAMDirectory();
// creating a document to store
Document lDoc = new Document();
lDoc.add(new Field("fulltext", "Once upon a time.....", Field.Store.YES, Field.Index.ANALYZED));
lDoc.add(new Field("id", "doc1", Field.Store.YES, Field.Index.NOT_ANALYZED));
lDoc.add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
// creating a document to store
Document lDoc2 = new Document();
lDoc2.add(new Field("fulltext", "in a galaxy far far away.....",
Field.Store.YES, Field.Index.ANALYZED));
lDoc2.add(new Field("id", "doc2", Field.Store.YES, Field.Index.NOT_ANALYZED));
lDoc2.add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
// creating a document to store
Document lDoc3 = new Document();
lDoc3.add(new Field("fulltext", "a bizarre bug manifested itself....",
Field.Store.YES, Field.Index.ANALYZED));
lDoc3.add(new Field("id", "doc3", Field.Store.YES, Field.Index.NOT_ANALYZED));
lDoc3.add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
// creating an index writer for the first index
IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
// creating an index writer for the second index, but writing nothing
IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
//--------------------------------------------------------------------
// scenario 1
//--------------------------------------------------------------------
// writing the documents to the first index
writerA.addDocument(lDoc);
writerA.addDocument(lDoc2);
writerA.addDocument(lDoc3);
writerA.optimize();
writerA.close();
// closing the second index
writerB.close();
// creating the query
QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "fulltext", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
Query query = parser.parse("handle:1");
// building the searchables
Searcher[] searchers = new Searcher[2];
// VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
searchers[0] = new IndexSearcher(indexStoreB, true);
searchers[1] = new IndexSearcher(indexStoreA, true);
// creating the multiSearcher
Searcher mSearcher = getMultiSearcherInstance(searchers);
// performing the search
ScoreDoc[] hits = mSearcher.search(query, null, 1000).scoreDocs;
assertEquals(3, hits.length);
// iterating over the hit documents
for (int i = 0; i < hits.length; i++) {
mSearcher.doc(hits[i].doc);
}
mSearcher.close();
//--------------------------------------------------------------------
// scenario 2
//--------------------------------------------------------------------
// adding one document to the empty index
writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writerB.addDocument(lDoc);
writerB.optimize();
writerB.close();
// building the searchables
Searcher[] searchers2 = new Searcher[2];
// VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
searchers2[0] = new IndexSearcher(indexStoreB, true);
searchers2[1] = new IndexSearcher(indexStoreA, true);
// creating the mulitSearcher
MultiSearcher mSearcher2 = getMultiSearcherInstance(searchers2);
// performing the same search
ScoreDoc[] hits2 = mSearcher2.search(query, null, 1000).scoreDocs;
assertEquals(4, hits2.length);
// iterating over the hit documents
for (int i = 0; i < hits2.length; i++) {
// no exception should happen at this point
mSearcher2.doc(hits2[i].doc);
}
// test the subSearcher() method:
Query subSearcherQuery = parser.parse("id:doc1");
hits2 = mSearcher2.search(subSearcherQuery, null, 1000).scoreDocs;
assertEquals(2, hits2.length);
assertEquals(0, mSearcher2.subSearcher(hits2[0].doc)); // hit from searchers2[0]
assertEquals(1, mSearcher2.subSearcher(hits2[1].doc)); // hit from searchers2[1]
subSearcherQuery = parser.parse("id:doc2");
hits2 = mSearcher2.search(subSearcherQuery, null, 1000).scoreDocs;
assertEquals(1, hits2.length);
assertEquals(1, mSearcher2.subSearcher(hits2[0].doc)); // hit from searchers2[1]
mSearcher2.close();
//--------------------------------------------------------------------
// scenario 3
//--------------------------------------------------------------------
// deleting the document just added, this will cause a different exception to take place
Term term = new Term("id", "doc1");
IndexReader readerB = IndexReader.open(indexStoreB, false);
readerB.deleteDocuments(term);
readerB.close();
// optimizing the index with the writer
writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writerB.optimize();
writerB.close();
// building the searchables
Searcher[] searchers3 = new Searcher[2];
searchers3[0] = new IndexSearcher(indexStoreB, true);
searchers3[1] = new IndexSearcher(indexStoreA, true);
// creating the mulitSearcher
Searcher mSearcher3 = getMultiSearcherInstance(searchers3);
// performing the same search
ScoreDoc[] hits3 = mSearcher3.search(query, null, 1000).scoreDocs;
assertEquals(3, hits3.length);
// iterating over the hit documents
for (int i = 0; i < hits3.length; i++) {
mSearcher3.doc(hits3[i].doc);
}
mSearcher3.close();
indexStoreA.close();
indexStoreB.close();
}