Package org.apache.lucene.store

Examples of org.apache.lucene.store.Directory


    for(int pass=0;pass<2;pass++) {

      boolean useCompoundFile = (pass % 2) != 0;

      Directory dir = newDirectory();

      KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);

      for(int j=0;j<N+1;j++) {
        IndexWriterConfig conf = newIndexWriterConfig(
            TEST_VERSION_CURRENT, new MockAnalyzer(random))
            .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
            .setMaxBufferedDocs(10);
        MergePolicy mp = conf.getMergePolicy();
        if (mp instanceof LogMergePolicy) {
          setUseCompoundFile(mp, useCompoundFile);
        }
        IndexWriter writer = new IndexWriter(dir, conf);
        for(int i=0;i<17;i++) {
          addDoc(writer);
        }
        writer.optimize();
        writer.close();
      }

      assertTrue(policy.numDelete > 0);
      assertEquals(N, policy.numOnInit);
      assertEquals(N+1, policy.numOnCommit);

      // Simplistic check: just verify only the past N segments_N's still
      // exist, and, I can open a reader on each:
      dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
      for(int i=0;i<N+1;i++) {
        try {
          IndexReader reader = IndexReader.open(dir, true);
          reader.close();
          if (i == N) {
            fail("should have failed on commits prior to last " + N);
          }
        } catch (IOException e) {
          if (i != N) {
            throw e;
          }
        }
        if (i < N) {
          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
        }
        gen--;
      }

      dir.close();
    }
  }
View Full Code Here


      boolean useCompoundFile = (pass % 2) != 0;

      KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);

      Directory dir = newDirectory();
      IndexWriterConfig conf = newIndexWriterConfig(
          TEST_VERSION_CURRENT, new MockAnalyzer(random))
        .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy).setMergePolicy(newLogMergePolicy());
      MergePolicy mp = conf.getMergePolicy();
      if (mp instanceof LogMergePolicy) {
        setUseCompoundFile(mp, useCompoundFile);
      }
      IndexWriter writer = new IndexWriter(dir, conf);
      writer.close();
      Term searchTerm = new Term("content", "aaa");       
      Query query = new TermQuery(searchTerm);

      for(int i=0;i<N+1;i++) {
        if (VERBOSE) {
          System.out.println("\nTEST: cycle i=" + i);
        }
        conf = newIndexWriterConfig(
            TEST_VERSION_CURRENT, new MockAnalyzer(random))
          .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy).setMergePolicy(newLogMergePolicy());
        mp = conf.getMergePolicy();
        if (mp instanceof LogMergePolicy) {
          setUseCompoundFile(mp, useCompoundFile);
        }
        writer = new IndexWriter(dir, conf);
        writer.setInfoStream(VERBOSE ? System.out : null);
        for(int j=0;j<17;j++) {
          addDoc(writer);
        }
        // this is a commit
        if (VERBOSE) {
          System.out.println("TEST: close writer");
        }
        writer.close();
        IndexReader reader = IndexReader.open(dir, policy, false);
        reader.deleteDocument(3*i+1);
        reader.setNorm(4*i+1, "content", 2.0F);
        IndexSearcher searcher = newSearcher(reader);
        ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
        assertEquals(16*(1+i), hits.length);
        // this is a commit
        if (VERBOSE) {
          System.out.println("TEST: close reader numOnCommit=" + policy.numOnCommit);
        }
        reader.close();
        searcher.close();
      }
      conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
          .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
      mp = conf.getMergePolicy();
      if (mp instanceof LogMergePolicy) {
        setUseCompoundFile(mp, useCompoundFile);
      }
      IndexReader r = IndexReader.open(dir);
      final boolean wasOptimized = r.isOptimized();
      r.close();
      writer = new IndexWriter(dir, conf);
      writer.optimize();
      // this is a commit
      writer.close();

      assertEquals(2*(N+1)+1, policy.numOnInit);
      assertEquals(2*(N+2) - (wasOptimized ? 1:0), policy.numOnCommit);

      IndexSearcher searcher = new IndexSearcher(dir, false);
      ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
      assertEquals(176, hits.length);

      // Simplistic check: just verify only the past N segments_N's still
      // exist, and, I can open a reader on each:
      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);

      dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
      int expectedCount = 176;
      searcher.close();
      for(int i=0;i<N+1;i++) {
        try {
          IndexReader reader = IndexReader.open(dir, true);

          // Work backwards in commits on what the expected
          // count should be.
          searcher = newSearcher(reader);
          hits = searcher.search(query, null, 1000).scoreDocs;
          if (i > 1) {
            if (i % 2 == 0) {
              expectedCount += 1;
            } else {
              expectedCount -= 17;
            }
          }
          assertEquals(expectedCount, hits.length);
          searcher.close();
          reader.close();
          if (i == N) {
            fail("should have failed on commits before last 5");
          }
        } catch (IOException e) {
          if (i != N) {
            throw e;
          }
        }
        if (i < N) {
          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
        }
        gen--;
      }
      dir.close();
    }
  }
View Full Code Here

      boolean useCompoundFile = (pass % 2) != 0;

      KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);

      Directory dir = newDirectory();
      IndexWriterConfig conf = newIndexWriterConfig(
          TEST_VERSION_CURRENT, new MockAnalyzer(random))
          .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
          .setMaxBufferedDocs(10);
      MergePolicy mp = conf.getMergePolicy();
      if (mp instanceof LogMergePolicy) {
        setUseCompoundFile(mp, useCompoundFile);
      }
      IndexWriter writer = new IndexWriter(dir, conf);
      writer.close();
      Term searchTerm = new Term("content", "aaa");       
      Query query = new TermQuery(searchTerm);

      for(int i=0;i<N+1;i++) {

        conf = newIndexWriterConfig(
            TEST_VERSION_CURRENT, new MockAnalyzer(random))
            .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy)
            .setMaxBufferedDocs(10);
        mp = conf.getMergePolicy();
        if (mp instanceof LogMergePolicy) {
          setUseCompoundFile(mp, useCompoundFile);
        }
        writer = new IndexWriter(dir, conf);
        for(int j=0;j<17;j++) {
          addDoc(writer);
        }
        // this is a commit
        writer.close();
        IndexReader reader = IndexReader.open(dir, policy, false);
        reader.deleteDocument(3);
        reader.setNorm(5, "content", 2.0F);
        IndexSearcher searcher = newSearcher(reader);
        ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
        assertEquals(16, hits.length);
        // this is a commit
        reader.close();
        searcher.close();

        writer = new IndexWriter(dir, newIndexWriterConfig(
            TEST_VERSION_CURRENT, new MockAnalyzer(random))
            .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy));
        // This will not commit: there are no changes
        // pending because we opened for "create":
        writer.close();
      }

      assertEquals(3*(N+1), policy.numOnInit);
      assertEquals(3*(N+1)+1, policy.numOnCommit);

      IndexSearcher searcher = new IndexSearcher(dir, false);
      ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
      assertEquals(0, hits.length);

      // Simplistic check: just verify only the past N segments_N's still
      // exist, and, I can open a reader on each:
      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);

      dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
      int expectedCount = 0;

      for(int i=0;i<N+1;i++) {
        try {
          IndexReader reader = IndexReader.open(dir, true);

          // Work backwards in commits on what the expected
          // count should be.
          searcher = newSearcher(reader);
          hits = searcher.search(query, null, 1000).scoreDocs;
          assertEquals(expectedCount, hits.length);
          searcher.close();
          if (expectedCount == 0) {
            expectedCount = 16;
          } else if (expectedCount == 16) {
            expectedCount = 17;
          } else if (expectedCount == 17) {
            expectedCount = 0;
          }
          reader.close();
          if (i == N) {
            fail("should have failed on commits before last " + N);
          }
        } catch (IOException e) {
          if (i != N) {
            throw e;
          }
        }
        if (i < N) {
          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
        }
        gen--;
      }
     
      dir.close();
    }
  }
View Full Code Here

   * separate norms. Including merging indexes with seprate norms. Including
   * optimize.
   */
  public void testNorms() throws IOException {
    // test with a single index: index1
    Directory dir1 = newDirectory();
    IndexWriter.unlock(dir1);

    norms = new ArrayList<Float>();
    modifiedNorms = new ArrayList<Float>();

    createIndex(random, dir1);
    doTestNorms(random, dir1);

    // test with a single index: index2
    ArrayList<Float> norms1 = norms;
    ArrayList<Float> modifiedNorms1 = modifiedNorms;
    int numDocNorms1 = numDocNorms;

    norms = new ArrayList<Float>();
    modifiedNorms = new ArrayList<Float>();
    numDocNorms = 0;

    Directory dir2 = newDirectory();

    createIndex(random, dir2);
    doTestNorms(random, dir2);

    // add index1 and index2 to a third index: index3
    Directory dir3 = newDirectory();

    createIndex(random, dir3);
    IndexWriter iw = new IndexWriter(dir3, newIndexWriterConfig(
        TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.APPEND)
                                     .setMaxBufferedDocs(5).setMergePolicy(newLogMergePolicy(3)));
    iw.addIndexes(new Directory[] { dir1, dir2 });
    iw.optimize();
    iw.close();

    norms1.addAll(norms);
    norms = norms1;
    modifiedNorms1.addAll(modifiedNorms);
    modifiedNorms = modifiedNorms1;
    numDocNorms += numDocNorms1;

    // test with index3
    verifyIndex(dir3);
    doTestNorms(random, dir3);

    // now with optimize
    iw = new IndexWriter(dir3, newIndexWriterConfig( TEST_VERSION_CURRENT,
                                                     anlzr).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(5).setMergePolicy(newLogMergePolicy(3)));
    iw.optimize();
    iw.close();
    verifyIndex(dir3);

    dir1.close();
    dir2.close();
    dir3.close();
  }
View Full Code Here

    irc.close();
    ir.close();
  }
 
  public void testNormsClose() throws IOException {
    Directory dir1 = newDirectory();
    TestIndexReaderReopen.createIndex(random, dir1, false);
    SegmentReader reader1 = SegmentReader.getOnlySegmentReader(dir1);
    reader1.norms("field1");
    SegmentNorms r1norm = reader1.norms.get("field1");
    AtomicInteger r1BytesRef = r1norm.bytesRef();
    SegmentReader reader2 = (SegmentReader)reader1.clone();
    assertEquals(2, r1norm.bytesRef().get());
    reader1.close();
    assertEquals(1, r1BytesRef.get());
    reader2.norms("field1");
    reader2.close();
    dir1.close();
  }
View Full Code Here

    reader2.close();
    dir1.close();
  }
 
  public void testNormsRefCounting() throws IOException {
    Directory dir1 = newDirectory();
    TestIndexReaderReopen.createIndex(random, dir1, false);
    IndexReader reader1 = IndexReader.open(dir1, false);
       
    IndexReader reader2C = (IndexReader) reader1.clone();
    SegmentReader segmentReader2C = SegmentReader.getOnlySegmentReader(reader2C);
    segmentReader2C.norms("field1"); // load the norms for the field
    SegmentNorms reader2CNorm = segmentReader2C.norms.get("field1");
    assertTrue("reader2CNorm.bytesRef()=" + reader2CNorm.bytesRef(), reader2CNorm.bytesRef().get() == 2);
   
   
   
    IndexReader reader3C = (IndexReader) reader2C.clone();
    SegmentReader segmentReader3C = SegmentReader.getOnlySegmentReader(reader3C);
    SegmentNorms reader3CCNorm = segmentReader3C.norms.get("field1");
    assertEquals(3, reader3CCNorm.bytesRef().get());
   
    // edit a norm and the refcount should be 1
    IndexReader reader4C = (IndexReader) reader3C.clone();
    SegmentReader segmentReader4C = SegmentReader.getOnlySegmentReader(reader4C);
    assertEquals(4, reader3CCNorm.bytesRef().get());
    reader4C.setNorm(5, "field1", 0.33f);
   
    // generate a cannot update exception in reader1
    try {
      reader3C.setNorm(1, "field1", 0.99f);
      fail("did not hit expected exception");
    } catch (Exception ex) {
      // expected
    }
   
    // norm values should be different
    assertTrue(Similarity.getDefault().decodeNormValue(segmentReader3C.norms("field1")[5])
        != Similarity.getDefault().decodeNormValue(segmentReader4C.norms("field1")[5]));
    SegmentNorms reader4CCNorm = segmentReader4C.norms.get("field1");
    assertEquals(3, reader3CCNorm.bytesRef().get());
    assertEquals(1, reader4CCNorm.bytesRef().get());
       
    IndexReader reader5C = (IndexReader) reader4C.clone();
    SegmentReader segmentReader5C = SegmentReader.getOnlySegmentReader(reader5C);
    SegmentNorms reader5CCNorm = segmentReader5C.norms.get("field1");
    reader5C.setNorm(5, "field1", 0.7f);
    assertEquals(1, reader5CCNorm.bytesRef().get());

    reader5C.close();
    reader4C.close();
    reader3C.close();
    reader2C.close();
    reader1.close();
    dir1.close();
  }
View Full Code Here

//    testEquals(fsdir, ii);
//  }


  public void testLoadIndexReader() throws Exception {
    Directory dir = newDirectory();

    // create dir data
    IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(
        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
   
    for (int i = 0; i < 20; i++) {
      Document document = new Document();
      assembleDocument(document, i);
      indexWriter.addDocument(document);
    }
    indexWriter.close();

    // test load ii from index reader
    IndexReader ir = IndexReader.open(dir, false);
    InstantiatedIndex ii = new InstantiatedIndex(ir);
    ir.close();

    testEqualBehaviour(dir, ii);
    dir.close();
  }
View Full Code Here

  }


  public void testInstantiatedIndexWriter() throws Exception {

    Directory dir = newDirectory();
    InstantiatedIndex ii = new InstantiatedIndex();
   
    // we need to pass the "same" random to both, so they surely index the same payload data.
    long seed = random.nextLong();
   
    // create dir data
    IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(
                                                                        TEST_VERSION_CURRENT, new MockAnalyzer(new Random(seed))).setMergePolicy(newLogMergePolicy()));
    indexWriter.setInfoStream(VERBOSE ? System.out : null);
    if (VERBOSE) {
      System.out.println("TEST: make test index");
    }
    for (int i = 0; i < 500; i++) {
      Document document = new Document();
      assembleDocument(document, i);
      indexWriter.addDocument(document);
    }
    indexWriter.close();

    // test ii writer
    InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new MockAnalyzer(new Random(seed)), true);
    for (int i = 0; i < 500; i++) {
      Document document = new Document();
      assembleDocument(document, i);
      instantiatedIndexWriter.addDocument(document);
    }
    instantiatedIndexWriter.close();


    testEqualBehaviour(dir, ii);

    dir.close();

  }
View Full Code Here

public class TestRollback extends LuceneTestCase {

  // LUCENE-2536
  public void testRollbackIntegrityWithBufferFlush() throws Exception {
    Directory dir = newDirectory();
    RandomIndexWriter rw = new RandomIndexWriter(random, dir);

    for (int i = 0; i < 5; i++) {
      Document doc = new Document();
      doc.add(newField("pk", Integer.toString(i), Store.YES, Index.ANALYZED_NO_NORMS));
      rw.addDocument(doc);
    }
    rw.close();

    // If buffer size is small enough to cause a flush, errors ensue...
    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2).setOpenMode(IndexWriterConfig.OpenMode.APPEND));

    Term pkTerm = new Term("pk", "");
    for (int i = 0; i < 3; i++) {
      Document doc = new Document();
      String value = Integer.toString(i);
      doc.add(newField("pk", value, Store.YES, Index.ANALYZED_NO_NORMS));
      doc.add(newField("text", "foo", Store.YES, Index.ANALYZED_NO_NORMS));
      w.updateDocument(pkTerm.createTerm(value), doc);
    }
    w.rollback();

    IndexReader r = IndexReader.open(dir, true);
    assertEquals("index should contain same number of docs post rollback", 5, r.numDocs());
    r.close();
    dir.close();
  }
View Full Code Here

    assertEquals("XHTML Encoding should have worked:", rawDocContent, decodedSnippet);
  }

  public void testMultiSearcher() throws Exception {
    // setup index 1
    Directory ramDir1 = newDirectory();
    IndexWriter writer1 = new IndexWriter(ramDir1, newIndexWriterConfig(
        TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)));
    Document d = new Document();
    Field f = new Field(FIELD_NAME, "multiOne", Field.Store.YES, Field.Index.ANALYZED);
    d.add(f);
    writer1.addDocument(d);
    writer1.optimize();
    writer1.close();
    IndexReader reader1 = IndexReader.open(ramDir1, true);

    // setup index 2
    Directory ramDir2 = newDirectory();
    IndexWriter writer2 = new IndexWriter(ramDir2, newIndexWriterConfig(
        TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)));
    d = new Document();
    f = new Field(FIELD_NAME, "multiTwo", Field.Store.YES, Field.Index.ANALYZED);
    d.add(f);
    writer2.addDocument(d);
    writer2.optimize();
    writer2.close();
    IndexReader reader2 = IndexReader.open(ramDir2, true);

    IndexSearcher searchers[] = new IndexSearcher[2];
    searchers[0] = new IndexSearcher(ramDir1, true);
    searchers[1] = new IndexSearcher(ramDir2, true);
    MultiSearcher multiSearcher = new MultiSearcher(searchers);
    QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new StandardAnalyzer(TEST_VERSION_CURRENT));
    parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
    query = parser.parse("multi*");
    if (VERBOSE) System.out.println("Searching for: " + query.toString(FIELD_NAME));
    // at this point the multisearcher calls combine(query[])
    hits = multiSearcher.search(query, null, 1000);

    // query = QueryParser.parse("multi*", FIELD_NAME, new StandardAnalyzer(TEST_VERSION));
    Query expandedQueries[] = new Query[2];
    expandedQueries[0] = query.rewrite(reader1);
    expandedQueries[1] = query.rewrite(reader2);
    query = query.combine(expandedQueries);

    // create an instance of the highlighter with the tags used to surround
    // highlighted text
    Highlighter highlighter = new Highlighter(this, new QueryTermScorer(query));

    for (int i = 0; i < hits.totalHits; i++) {
      String text = multiSearcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
      String highlightedText = highlighter.getBestFragment(tokenStream, text);
      if (VERBOSE) System.out.println(highlightedText);
    }
    assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
        numHighlights == 2);
    reader1.close();
    reader2.close();
    searchers[0].close();
    searchers[1].close();
    ramDir1.close();
    ramDir2.close();
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.store.Directory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.