Package org.apache.lucene.codecs.lucene41

Examples of org.apache.lucene.codecs.lucene41.Lucene41PostingsFormat


  }
 
  // LUCENE-1609: don't load terms index
  public void testNoTermsIndex() throws Throwable {
    Directory dir = newDirectory();
    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())));
    Document doc = new Document();
    doc.add(newTextField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO));
    doc.add(newTextField("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO));
    writer.addDocument(doc);
    writer.addDocument(doc);
    writer.close();
 
    DirectoryReader r = DirectoryReader.open(dir, -1);
    try {
      r.docFreq(new Term("field", "f"));
      fail("did not hit expected exception");
    } catch (IllegalStateException ise) {
      // expected
    }
 
    assertEquals(-1, ((SegmentReader) r.leaves().get(0).reader()).getTermInfosIndexDivisor());
    writer = new IndexWriter(
        dir,
        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).
            setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())).
            setMergePolicy(newLogMergePolicy(10))
    );
    writer.addDocument(doc);
    writer.close();
 
View Full Code Here


   * @deprecated just to ensure IndexReader static methods work
   */
  @Deprecated
  public void testBackwards() throws Exception {
    Directory dir = newDirectory();
    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())));
    Document doc = new Document();
    doc.add(newTextField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO));
    doc.add(newTextField("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO));
    writer.addDocument(doc);

View Full Code Here

  }

  public void testTermOrd() throws Exception {
    Directory d = newDirectory();
    IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
                                                             new MockAnalyzer(random())).setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())));
    Document doc = new Document();
    doc.add(newTextField("f", "a b c", Field.Store.NO));
    w.addDocument(doc);
    w.forceMerge(1);
    DirectoryReader r = w.getReader();
View Full Code Here

    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
    iwc.setMaxBufferedDocs(5);
    iwc.setMergeScheduler(new TrackingCMS());
    if (TestUtil.getPostingsFormat("id").equals("SimpleText")) {
      // no
      iwc.setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()));
    }
    RandomIndexWriter w = new RandomIndexWriter(random(), d, iwc);
    for(int i=0;i<1000;i++) {
      Document doc = new Document();
      doc.add(new StringField("id", ""+i, Field.Store.NO));
View Full Code Here

    counter = 0;
  }

  public void testSimpleSkip() throws IOException {
    Directory dir = new CountingRAMDirectory(new RAMDirectory());
    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new PayloadAnalyzer()).setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())).setMergePolicy(newLogMergePolicy()));
    Term term = new Term("test", "a");
    for (int i = 0; i < 5000; i++) {
      Document d1 = new Document();
      d1.add(newTextField(term.field(), term.text(), Field.Store.NO));
      writer.addDocument(d1);
View Full Code Here

    }
  }
 
  public TestBloomFilteredLucene41Postings() {
    super("TestBloomFilteredLucene41Postings");
    delegate = new BloomFilteringPostingsFormat(new Lucene41PostingsFormat(),
        new LowMemoryBloomFactory());
  }
View Full Code Here

    }
   
    Directory ramdir = new RAMDirectory();
    Analyzer analyzer = randomAnalyzer();
    IndexWriter writer = new IndexWriter(ramdir,
                                         new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())));
    Document doc = new Document();
    Field field1 = newTextField("foo", fooField.toString(), Field.Store.NO);
    Field field2 = newTextField("term", termField.toString(), Field.Store.NO);
    doc.add(field1);
    doc.add(field2);
View Full Code Here

    int minItemsPerBlock = TestUtil.nextInt(random, 2, 100);
    int maxItemsPerBlock = 2*(Math.max(2, minItemsPerBlock-1)) + random.nextInt(100);
    int lowFreqCutoff = TestUtil.nextInt(random, 2, 100);

    add(avoidCodecs,
        new Lucene41PostingsFormat(minItemsPerBlock, maxItemsPerBlock),
        new FSTPostingsFormat(),
        new FSTOrdPostingsFormat(),
        new FSTPulsing41PostingsFormat(1 + random.nextInt(20)),
        new FSTOrdPulsing41PostingsFormat(1 + random.nextInt(20)),
        new DirectPostingsFormat(LuceneTestCase.rarely(random) ? 1 : (LuceneTestCase.rarely(random) ? Integer.MAX_VALUE : maxItemsPerBlock),
View Full Code Here

                                             state.context, new MockIntStreamFactory(random), state.segmentSuffix);
    } else {
      if (LuceneTestCase.VERBOSE) {
        System.out.println("MockRandomCodec: reading Standard postings");
      }
      postingsReader = new Lucene41PostingsReader(state.directory, state.fieldInfos, state.segmentInfo, state.context, state.segmentSuffix);
    }

    if (random.nextBoolean()) {
      final int totTFCutoff = _TestUtil.nextInt(random, 1, 20);
      if (LuceneTestCase.VERBOSE) {
View Full Code Here

    PostingsReaderBase docsReader = null;
    PostingsReaderBase pulsingReaderInner = null;
    PostingsReaderBase pulsingReader = null;
    boolean success = false;
    try {
      docsReader = new Lucene41PostingsReader(state.directory, state.fieldInfos, state.segmentInfo, state.context, state.segmentSuffix);
      pulsingReaderInner = new PulsingPostingsReader(docsReader);
      pulsingReader = new PulsingPostingsReader(pulsingReaderInner);
      FieldsProducer ret = new BlockTreeTermsReader(
                                                    state.directory, state.fieldInfos, state.segmentInfo,
                                                    pulsingReader,
View Full Code Here

TOP

Related Classes of org.apache.lucene.codecs.lucene41.Lucene41PostingsFormat

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.