Examples of RandomIndexWriter


Examples of org.apache.lucene.index.RandomIndexWriter

    this.addDocument("<c123456>");
    this.addDocument("<d123456>");
    this.addDocument("<e123456>");

    final Directory directory2 = newDirectory();
    final RandomIndexWriter writer2 = newRandomIndexWriter(directory2, analyzer, codec);
    addDocument(writer2, "<a123456>");
    addDocument(writer2, "<b123456>");
    addDocument(writer2, "<b123456>");
    addDocument(writer2, "<b123456>");
    addDocument(writer2, "<c123456>");
    addDocument(writer2, "<f123456>");

    final IndexReader ir1 = writer.getReader();
    final IndexReader ir2 = writer2.getReader();

    final MultiReader mr = new MultiReader(ir1, ir2);
    final IndexSearcher searcher = newSearcher(mr);
    final FuzzyQuery fq = new FuzzyQuery(new Term(DEFAULT_TEST_FIELD, "z123456"), 1, 0, 2, false);
    final TopDocs docs = searcher.search(fq, 2);
    assertEquals(5, docs.totalHits); // 5 docs, from the a and b's

    mr.close();
    ir2.close();
    writer2.close();
    directory2.close();
  }
View Full Code Here

Examples of org.apache.lucene.index.RandomIndexWriter

  }

  // LUCENE-1630
  public void testNullOrSubScorer() throws Throwable {
    Directory dir = newDirectory();
    RandomIndexWriter w = new RandomIndexWriter(random, dir);
    Document doc = new Document();
    doc.add(newField("field", "a b c d", Field.Store.NO, Field.Index.ANALYZED));
    w.addDocument(doc);

    IndexReader r = w.getReader();
    IndexSearcher s = newSearcher(r);
    BooleanQuery q = new BooleanQuery();
    q.add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD);

    // LUCENE-2617: make sure that a term not in the index still contributes to the score via coord factor
    float score = s.search(q, 10).getMaxScore();
    Query subQuery = new TermQuery(new Term("field", "not_in_index"));
    subQuery.setBoost(0);
    q.add(subQuery, BooleanClause.Occur.SHOULD);
    float score2 = s.search(q, 10).getMaxScore();
    assertEquals(score*.5, score2, 1e-6);

    // now test BooleanScorer2
    subQuery = new TermQuery(new Term("field", "b"));
    subQuery.setBoost(0);
    q.add(subQuery, BooleanClause.Occur.MUST);
    score2 = s.search(q, 10).getMaxScore();
    assertEquals(score*(2.0/3), score2, 1e-6);
    // PhraseQuery w/ no terms added returns a null scorer
    PhraseQuery pq = new PhraseQuery();
    q.add(pq, BooleanClause.Occur.SHOULD);
    assertEquals(1, s.search(q, 10).totalHits);

    // A required clause which returns null scorer should return null scorer to
    // IndexSearcher.
    q = new BooleanQuery();
    pq = new PhraseQuery();
    q.add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD);
    q.add(pq, BooleanClause.Occur.MUST);
    assertEquals(0, s.search(q, 10).totalHits);

    DisjunctionMaxQuery dmq = new DisjunctionMaxQuery(1.0f);
    dmq.add(new TermQuery(new Term("field", "a")));
    dmq.add(pq);
    assertEquals(1, s.search(dmq, 10).totalHits);
   
    s.close();
    r.close();
    w.close();
    dir.close();
  }
View Full Code Here

Examples of org.apache.lucene.index.RandomIndexWriter

    dir.close();
  }
 
  public void testDeMorgan() throws Exception {
    Directory dir1 = newDirectory();
    RandomIndexWriter iw1 = new RandomIndexWriter(random, dir1);
    Document doc1 = new Document();
    doc1.add(newField("field", "foo bar", Field.Index.ANALYZED));
    iw1.addDocument(doc1);
    IndexReader reader1 = iw1.getReader();
    iw1.close();
   
    Directory dir2 = newDirectory();
    RandomIndexWriter iw2 = new RandomIndexWriter(random, dir2);
    Document doc2 = new Document();
    doc2.add(newField("field", "foo baz", Field.Index.ANALYZED));
    iw2.addDocument(doc2);
    IndexReader reader2 = iw2.getReader();
    iw2.close();
   
    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random));
    qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
   
    MultiReader multireader = new MultiReader(reader1, reader2);
View Full Code Here

Examples of org.apache.lucene.index.RandomIndexWriter

  @Override
  public void setUp() throws Exception {
    super.setUp();
    directory = newDirectory();
   
    RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
    for (int i = 0; i < values.length; i++) {
      Document doc = new Document();
      doc
          .add(newField(FIELD, values[i], Field.Store.YES,
              Field.Index.ANALYZED));
      writer.addDocument(doc);
    }
    writer.optimize();
    indexReader = writer.getReader();
    writer.close();
    indexSearcher = newSearcher(indexReader);
  }
View Full Code Here

Examples of org.apache.lucene.index.RandomIndexWriter

   */
  @Override
  public void setUp() throws Exception {
    super.setUp();
    index = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter(random, index);
    RandomGen random = new RandomGen(this.random);
    for (int i = 0; i < INDEX_SIZE; ++i) { // don't decrease; if to low the
                                           // problem doesn't show up
      Document doc = new Document();
      if ((i % 5) != 0) { // some documents must not have an entry in the first
                          // sort field
        doc.add(newField("publicationDate_", random.getLuceneDate(),
            Field.Store.YES, Field.Index.NOT_ANALYZED));
      }
      if ((i % 7) == 0) { // some documents to match the query (see below)
        doc.add(newField("content", "test", Field.Store.YES,
            Field.Index.ANALYZED));
      }
      // every document has a defined 'mandant' field
      doc.add(newField("mandant", Integer.toString(i % 3), Field.Store.YES,
          Field.Index.NOT_ANALYZED));
      writer.addDocument(doc);
    }
    reader = writer.getReader();
    writer.close();
    query = new TermQuery(new Term("content", "test"));
  }
View Full Code Here

Examples of org.apache.lucene.index.RandomIndexWriter

public class TestCachingSpanFilter extends LuceneTestCase {

  public void testEnforceDeletions() throws Exception {
    Directory dir = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter(
        random,
        dir,
        newIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer(random)).
            setMergeScheduler(new SerialMergeScheduler()).
            // asserts below requires no unexpected merges:
            setMergePolicy(newLogMergePolicy(10))
    );

    // NOTE: cannot use writer.getReader because RIW (on
    // flipping a coin) may give us a newly opened reader,
    // but we use .reopen on this reader below and expect to
    // (must) get an NRT reader:
    IndexReader reader = IndexReader.open(writer.w, true);
    // same reason we don't wrap?
    IndexSearcher searcher = newSearcher(reader, false);

    // add a doc, refresh the reader, and check that its there
    Document doc = new Document();
    doc.add(newField("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
    writer.addDocument(doc);

    reader = refreshReader(reader);
    searcher.close();
    searcher = newSearcher(reader, false);

    TopDocs docs = searcher.search(new MatchAllDocsQuery(), 1);
    assertEquals("Should find a hit...", 1, docs.totalHits);

    final SpanFilter startFilter = new SpanQueryFilter(new SpanTermQuery(new Term("id", "1")));

    // ignore deletions
    CachingSpanFilter filter = new CachingSpanFilter(startFilter, CachingWrapperFilter.DeletesMode.IGNORE);
       
    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
    assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
    ConstantScoreQuery constantScore = new ConstantScoreQuery(filter);
    docs = searcher.search(constantScore, 1);
    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);

    // now delete the doc, refresh the reader, and see that
    // it's not there
    _TestUtil.keepFullyDeletedSegments(writer.w);
    writer.deleteDocuments(new Term("id", "1"));

    reader = refreshReader(reader);
    searcher.close();
    searcher = newSearcher(reader, false);

    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
    assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);

    docs = searcher.search(constantScore, 1);
    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);


    // force cache to regenerate:
    filter = new CachingSpanFilter(startFilter, CachingWrapperFilter.DeletesMode.RECACHE);

    writer.addDocument(doc);
    reader = refreshReader(reader);
    searcher.close();
    searcher = newSearcher(reader, false);
       
    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
    assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);

    constantScore = new ConstantScoreQuery(filter);
    docs = searcher.search(constantScore, 1);
    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);

    // NOTE: important to hold ref here so GC doesn't clear
    // the cache entry!  Else the assert below may sometimes
    // fail:
    IndexReader oldReader = reader;

    // make sure we get a cache hit when we reopen readers
    // that had no new deletions
    reader = refreshReader(reader);
    assertTrue(reader != oldReader);
    searcher.close();
    searcher = newSearcher(reader, false);
    int missCount = filter.missCount;
    docs = searcher.search(constantScore, 1);
    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
    assertEquals(missCount, filter.missCount);

    // now delete the doc, refresh the reader, and see that it's not there
    writer.deleteDocuments(new Term("id", "1"));

    reader = refreshReader(reader);
    searcher.close();
    searcher = newSearcher(reader, false);

    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
    assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);

    docs = searcher.search(constantScore, 1);
    assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);

    // NOTE: silliness to make sure JRE does not optimize
    // away our holding onto oldReader to prevent
    // CachingWrapperFilter's WeakHashMap from dropping the
    // entry:
    assertTrue(oldReader != null);

    searcher.close();
    writer.close();
    reader.close();
    dir.close();
  }
View Full Code Here

Examples of org.apache.lucene.index.RandomIndexWriter

  @Override
  public void setUp() throws Exception {
    super.setUp();
    // Create an index writer.
    directory = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter(random, directory, new MockAnalyzer(random));

    // oldest doc:
    // Add the first document.  text = "Document 1"  dateTime = Oct 10 03:25:22 EDT 2007
    writer.addDocument(createDocument("Document 1", 1192001122000L));
    // Add the second document.  text = "Document 2"  dateTime = Oct 10 03:25:26 EDT 2007
    writer.addDocument(createDocument("Document 2", 1192001126000L));
    // Add the third document.  text = "Document 3"  dateTime = Oct 11 07:12:13 EDT 2007
    writer.addDocument(createDocument("Document 3", 1192101133000L));
    // Add the fourth document.  text = "Document 4"  dateTime = Oct 11 08:02:09 EDT 2007
    writer.addDocument(createDocument("Document 4", 1192104129000L));
    // latest doc:
    // Add the fifth document.  text = "Document 5"  dateTime = Oct 12 13:25:43 EDT 2007
    writer.addDocument(createDocument("Document 5", 1192209943000L));

    reader = writer.getReader();
    writer.close();
  }
View Full Code Here

Examples of org.apache.lucene.index.RandomIndexWriter

  @Override
  public void setUp() throws Exception {
    super.setUp();
    directory = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter (random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));

    Document doc = new Document();
    doc.add (newField("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
    doc.add (newField("sorter", "b", Field.Store.YES, Field.Index.ANALYZED));
    writer.addDocument (doc);

    doc = new Document();
    doc.add (newField("field", "one two three four", Field.Store.YES, Field.Index.ANALYZED));
    doc.add (newField("sorter", "d", Field.Store.YES, Field.Index.ANALYZED));
    writer.addDocument (doc);

    doc = new Document();
    doc.add (newField("field", "one two three y", Field.Store.YES, Field.Index.ANALYZED));
    doc.add (newField("sorter", "a", Field.Store.YES, Field.Index.ANALYZED));
    writer.addDocument (doc);

    doc = new Document();
    doc.add (newField("field", "one two x", Field.Store.YES, Field.Index.ANALYZED));
    doc.add (newField("sorter", "c", Field.Store.YES, Field.Index.ANALYZED));
    writer.addDocument (doc);

    // tests here require single segment (eg try seed
    // 8239472272678419952L), because SingleDocTestFilter(x)
    // blindly accepts that docID in any sub-segment
    writer.optimize();

    reader = writer.getReader();
    writer.close ();

    searcher = newSearcher(reader);
    query = new TermQuery (new Term ("field", "three"));
    filter = newStaticFilterB();
  }
View Full Code Here

Examples of org.apache.lucene.index.RandomIndexWriter

  private static IndexSearcher searcher = null;
 
  @BeforeClass
  public static void beforeClass() throws Exception {
    directory = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter(random, directory,
        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
        .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))
        .setMergePolicy(newLogMergePolicy()));
   
    NumericField
      field8 = new NumericField("field8", 8, Field.Store.YES, true),
      field6 = new NumericField("field6", 6, Field.Store.YES, true),
      field4 = new NumericField("field4", 4, Field.Store.YES, true),
      field2 = new NumericField("field2", 2, Field.Store.YES, true),
      fieldNoTrie = new NumericField("field"+Integer.MAX_VALUE, Integer.MAX_VALUE, Field.Store.YES, true),
      ascfield8 = new NumericField("ascfield8", 8, Field.Store.NO, true),
      ascfield6 = new NumericField("ascfield6", 6, Field.Store.NO, true),
      ascfield4 = new NumericField("ascfield4", 4, Field.Store.NO, true),
      ascfield2 = new NumericField("ascfield2", 2, Field.Store.NO, true);
   
    Document doc = new Document();
    // add fields, that have a distance to test general functionality
    doc.add(field8); doc.add(field6); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
    // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
    doc.add(ascfield8); doc.add(ascfield6); doc.add(ascfield4); doc.add(ascfield2);
   
    // Add a series of noDocs docs with increasing long values, by updating the fields
    for (int l=0; l<noDocs; l++) {
      long val=distance*l+startOffset;
      field8.setLongValue(val);
      field6.setLongValue(val);
      field4.setLongValue(val);
      field2.setLongValue(val);
      fieldNoTrie.setLongValue(val);

      val=l-(noDocs/2);
      ascfield8.setLongValue(val);
      ascfield6.setLongValue(val);
      ascfield4.setLongValue(val);
      ascfield2.setLongValue(val);
      writer.addDocument(doc);
    }
 
    reader = writer.getReader();
    searcher=newSearcher(reader);
    writer.close();
  }
View Full Code Here

Examples of org.apache.lucene.index.RandomIndexWriter

 
  @Override
  public void setUp() throws Exception {
    super.setUp();
    dir = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter(random, dir,
        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
        .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
   
    Document doc = new Document();
    Field bogus1 = newField("bogus", "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS);
    Field field = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
    Field bogus2 = newField("zbogus", "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS);
    doc.add(field);
    doc.add(bogus1);
    doc.add(bogus2);
   
    int num = atLeast(2000);

    for (int i = 0; i < num; i++) {
      field.setValue(_TestUtil.randomUnicodeString(random, 10));
      bogus1.setValue(_TestUtil.randomUnicodeString(random, 10));
      bogus2.setValue(_TestUtil.randomUnicodeString(random, 10));
      writer.addDocument(doc);
    }
    reader = writer.getReader();
    searcher = newSearcher(reader);
    writer.close();
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.