Package org.apache.lucene.index

Examples of org.apache.lucene.index.IndexWriter$MaxFieldLength


  }

  private static RAMDirectory makeEmptyIndex(final int numDeletedDocs)
    throws IOException {
      RAMDirectory d = new RAMDirectory();
      IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(), true,
                                      MaxFieldLength.LIMITED);
      for (int i = 0; i < numDeletedDocs; i++) {
        w.addDocument(new Document());
      }
      w.commit();
      w.deleteDocuments( new MatchAllDocsQuery() );
      w.commit();

      if (0 < numDeletedDocs)
        Assert.assertTrue("writer has no deletions", w.hasDeletions());

      Assert.assertEquals("writer is missing some deleted docs",
                          numDeletedDocs, w.maxDoc());
      Assert.assertEquals("writer has non-deleted docs",
                          0, w.numDocs());
      w.close();
      IndexReader r = IndexReader.open(d, true);
      Assert.assertEquals("reader has wrong number of deleted docs",
                          numDeletedDocs, r.numDeletedDocs());
      r.close();
      return d;
View Full Code Here


    // plan to add a set of useful stopwords, consider changing some of the
    // interior filters.
    StandardAnalyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, Collections.emptySet());
    // TODO: something about lock timeouts and leftover locks.
    IndexWriter writer = new IndexWriter(storeDirectory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
    IndexSearcher searcher = new IndexSearcher(storeDirectory, true);
   
    for(int dx = 0; dx < 1000; dx ++) {
      String f = randomField();
      Document doc = new Document();
      doc.add(new Field("data", f, Field.Store.YES, Field.Index.ANALYZED))
      writer.addDocument(doc);
    }
   
    searcher.close();
    writer.close();
                rmDir(new File(storePathname));
  }
View Full Code Here

import org.apache.lucene.util.OpenBitSetDISI;

public class TestCachingWrapperFilter extends LuceneTestCase {
  public void testCachingWorks() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter writer = new IndexWriter(dir, new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    writer.close();

    IndexReader reader = IndexReader.open(dir, true);

    MockFilter filter = new MockFilter();
    CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
View Full Code Here

    reader.close();
  }
 
  public void testNullDocIdSet() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter writer = new IndexWriter(dir, new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    writer.close();

    IndexReader reader = IndexReader.open(dir, true);

    final Filter filter = new Filter() {
      @Override
View Full Code Here

    reader.close();
  }
 
  public void testNullDocIdSetIterator() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter writer = new IndexWriter(dir, new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    writer.close();

    IndexReader reader = IndexReader.open(dir, true);

    final Filter filter = new Filter() {
      @Override
View Full Code Here

    }
  }
 
  public void testIsCacheAble() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter writer = new IndexWriter(dir, new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    writer.close();

    IndexReader reader = IndexReader.open(dir, true);

    // not cacheable:
    assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false);
View Full Code Here

    reader.close();
  }

  public void testEnforceDeletions() throws Exception {
    Directory dir = new MockRAMDirectory();
    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
    IndexReader reader = writer.getReader();
    IndexSearcher searcher = new IndexSearcher(reader);

    // add a doc, refresh the reader, and check that its there
    Document doc = new Document();
    doc.add(new Field("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
    writer.addDocument(doc);

    reader = refreshReader(reader);
    searcher = new IndexSearcher(reader);

    TopDocs docs = searcher.search(new MatchAllDocsQuery(), 1);
    assertEquals("Should find a hit...", 1, docs.totalHits);

    final Filter startFilter = new QueryWrapperFilter(new TermQuery(new Term("id", "1")));

    // ignore deletions
    CachingWrapperFilter filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.IGNORE);
       
    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
    assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
    ConstantScoreQuery constantScore = new ConstantScoreQuery(filter);
    docs = searcher.search(constantScore, 1);
    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);

    // now delete the doc, refresh the reader, and see that it's not there
    writer.deleteDocuments(new Term("id", "1"));

    reader = refreshReader(reader);
    searcher = new IndexSearcher(reader);

    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
    assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);

    docs = searcher.search(constantScore, 1);
    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);


    // force cache to regenerate:
    filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.RECACHE);

    writer.addDocument(doc);
    reader = refreshReader(reader);
    searcher = new IndexSearcher(reader);
       
    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
    assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);

    constantScore = new ConstantScoreQuery(filter);
    docs = searcher.search(constantScore, 1);
    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);

    // make sure we get a cache hit when we reopen reader
    // that had no change to deletions
    IndexReader newReader = refreshReader(reader);
    assertTrue(reader != newReader);
    reader = newReader;
    searcher = new IndexSearcher(reader);
    int missCount = filter.missCount;
    docs = searcher.search(constantScore, 1);
    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
    assertEquals(missCount, filter.missCount);

    // now delete the doc, refresh the reader, and see that it's not there
    writer.deleteDocuments(new Term("id", "1"));

    reader = refreshReader(reader);
    searcher = new IndexSearcher(reader);

    missCount = filter.missCount;
    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
    assertEquals(missCount+1, filter.missCount);
    assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
    docs = searcher.search(constantScore, 1);
    assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);


    // apply deletions dynamically
    filter = new CachingWrapperFilter(startFilter, CachingWrapperFilter.DeletesMode.DYNAMIC);

    writer.addDocument(doc);
    reader = refreshReader(reader);
    searcher = new IndexSearcher(reader);
       
    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
    assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
    constantScore = new ConstantScoreQuery(filter);
    docs = searcher.search(constantScore, 1);
    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);

    // now delete the doc, refresh the reader, and see that it's not there
    writer.deleteDocuments(new Term("id", "1"));

    reader = refreshReader(reader);
    searcher = new IndexSearcher(reader);

    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
View Full Code Here

  public void testLoadIndexReader() throws Exception {
    RAMDirectory dir = new RAMDirectory();

    // create dir data
    IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
    for (int i = 0; i < 20; i++) {
      Document document = new Document();
      assembleDocument(document, i);
      indexWriter.addDocument(document);
    }
    indexWriter.close();

    // test load ii from index reader
    IndexReader ir = IndexReader.open(dir, false);
    InstantiatedIndex ii = new InstantiatedIndex(ir);
    ir.close();
View Full Code Here

    RAMDirectory dir = new RAMDirectory();
    InstantiatedIndex ii = new InstantiatedIndex();

    // create dir data
    IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
    for (int i = 0; i < 500; i++) {
      Document document = new Document();
      assembleDocument(document, i);
      indexWriter.addDocument(document);
    }
    indexWriter.close();

    // test ii writer
    InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true);
    for (int i = 0; i < 500; i++) {
      Document document = new Document();
View Full Code Here

        dir.setLockFactory(lf);

        // Lock prefix should have been set:
        assertTrue("lock prefix was not set by the RAMDirectory", lf.lockPrefixSet);

        IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true,
                                             IndexWriter.MaxFieldLength.LIMITED);

        // add 100 documents (so that commit lock is used)
        for (int i = 0; i < 100; i++) {
            addDoc(writer);
        }

        // Both write lock and commit lock should have been created:
        assertEquals("# of unique locks created (after instantiating IndexWriter)",
                     1, lf.locksCreated.size());
        assertTrue("# calls to makeLock is 0 (after instantiating IndexWriter)",
                   lf.makeLockCount >= 1);
       
        for(Iterator e = lf.locksCreated.keySet().iterator(); e.hasNext();) {
            String lockName = (String) e.next();
            MockLockFactory.MockLock lock = (MockLockFactory.MockLock) lf.locksCreated.get(lockName);
            assertTrue("# calls to Lock.obtain is 0 (after instantiating IndexWriter)",
                       lock.lockAttempts > 0);
        }
       
        writer.close();
    }
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.IndexWriter$MaxFieldLength

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.