Package org.apache.lucene.store

Examples of org.apache.lucene.store.Directory


      return true;
    }
  }
 
  public void testRandomIWReader() throws Throwable {
    Directory dir = newDirectory();
   
    // TODO: verify equals using IW.getReader
    DocsAndWriter dw = indexRandomIWReader(5, 3, 100, dir);
    IndexReader reader = dw.writer.getReader();
    dw.writer.commit();
    verifyEquals(random, reader, dir, "id");
    reader.close();
    dw.writer.close();
    dir.close();
  }
View Full Code Here


    dw.writer.close();
    dir.close();
  }
 
  public void testRandom() throws Throwable {
    Directory dir1 = newDirectory();
    Directory dir2 = newDirectory();
    // mergeFactor=2; maxBufferedDocs=2; Map docs = indexRandom(1, 3, 2, dir1);
    int maxThreadStates = 1+random.nextInt(10);
    boolean doReaderPooling = random.nextBoolean();
    Map<String,Document> docs = indexRandom(5, 3, 100, dir1, maxThreadStates, doReaderPooling);
    indexSerial(random, docs, dir2);

    // verifying verify
    // verifyEquals(dir1, dir1, "id");
    // verifyEquals(dir2, dir2, "id");

    verifyEquals(dir1, dir2, "id");
    dir1.close();
    dir2.close();
  }
View Full Code Here

      seed++;

      int nThreads=random.nextInt(5)+1;
      int iter=random.nextInt(5)+1;
      int range=random.nextInt(20)+1;
      Directory dir1 = newDirectory();
      Directory dir2 = newDirectory();
      if (VERBOSE) {
        System.out.println("  nThreads=" + nThreads + " iter=" + iter + " range=" + range + " doPooling=" + doReaderPooling + " maxThreadStates=" + maxThreadStates + " sameFieldOrder=" + sameFieldOrder + " mergeFactor=" + mergeFactor);
      }
      Map<String,Document> docs = indexRandom(nThreads, iter, range, dir1, maxThreadStates, doReaderPooling);
      if (VERBOSE) {
        System.out.println("TEST: index serial");
      }
      indexSerial(random, docs, dir2);
      if (VERBOSE) {
        System.out.println("TEST: verify");
      }
      verifyEquals(dir1, dir2, "id");
      dir1.close();
      dir2.close();
    }
  }
View Full Code Here

  // still want to be robust to this case:
  public void testCloseWithThreads() throws Exception {
    int NUM_THREADS = 3;

    for(int iter=0;iter<7;iter++) {
      Directory dir = newDirectory();
      IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
        .setMaxBufferedDocs(10).setMergeScheduler(new ConcurrentMergeScheduler()).setMergePolicy(newLogMergePolicy(4));
      // We expect AlreadyClosedException
      ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
      IndexWriter writer = new IndexWriter(dir, conf);

      IndexerThread[] threads = new IndexerThread[NUM_THREADS];

      for(int i=0;i<NUM_THREADS;i++)
        threads[i] = new IndexerThread(writer, false);

      for(int i=0;i<NUM_THREADS;i++)
        threads[i].start();

      boolean done = false;
      while(!done) {
        Thread.sleep(100);
        for(int i=0;i<NUM_THREADS;i++)
          // only stop when at least one thread has added a doc
          if (threads[i].addCount > 0) {
            done = true;
            break;
          }
      }

      writer.close(false);

      // Make sure threads that are adding docs are not hung:
      for(int i=0;i<NUM_THREADS;i++) {
        // Without fix for LUCENE-1130: one of the
        // threads will hang
        threads[i].join();
        if (threads[i].isAlive())
          fail("thread seems to be hung");
      }

      // Quick test to make sure index is not corrupt:
      IndexReader reader = IndexReader.open(dir, true);
      TermDocs tdocs = reader.termDocs(new Term("field", "aaa"));
      int count = 0;
      while(tdocs.next()) {
        count++;
      }
      assertTrue(count > 0);
      reader.close();
     
      dir.close();
    }
  }
View Full Code Here

        }
        super.tearDown();
    }

    public void testIndexWriterLockRelease() throws IOException {
        Directory dir = newFSDirectory(this.__test_dir);
        try {
          new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
              new StandardAnalyzer(TEST_VERSION_CURRENT))
          .setOpenMode(OpenMode.APPEND));
        } catch (FileNotFoundException e) {
            try {
              new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
                  new StandardAnalyzer(TEST_VERSION_CURRENT))
              .setOpenMode(OpenMode.APPEND));
            } catch (FileNotFoundException e1) {
            }
        } finally {
          dir.close();
        }
    }
View Full Code Here

  }

  public void testLongPostings() throws Exception {
    // Don't use _TestUtil.getTempDir so that we own the
    // randomness (ie same seed will point to same dir):
    Directory dir = newFSDirectory(_TestUtil.getTempDir("longpostings" + "." + random.nextLong()));

    final int NUM_DOCS = atLeast(2000);

    if (VERBOSE) {
      System.out.println("TEST: NUM_DOCS=" + NUM_DOCS);
    }

    final String s1 = getRandomTerm(null);
    final String s2 = getRandomTerm(s1);

    if (VERBOSE) {
      System.out.println("\nTEST: s1=" + s1 + " s2=" + s2);
      /*
      for(int idx=0;idx<s1.length();idx++) {
        System.out.println("  s1 ch=0x" + Integer.toHexString(s1.charAt(idx)));
      }
      for(int idx=0;idx<s2.length();idx++) {
        System.out.println("  s2 ch=0x" + Integer.toHexString(s2.charAt(idx)));
      }
      */
    }

    final OpenBitSet isS1 = new OpenBitSet(NUM_DOCS);
    for(int idx=0;idx<NUM_DOCS;idx++) {
      if (random.nextBoolean()) {
        isS1.set(idx);
      }
    }

    final IndexReader r;
    if (true) {
      final IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
        .setMergePolicy(newLogMergePolicy());
      iwc.setRAMBufferSizeMB(16.0 + 16.0 * random.nextDouble());
      iwc.setMaxBufferedDocs(-1);
      final RandomIndexWriter riw = new RandomIndexWriter(random, dir, iwc);

      for(int idx=0;idx<NUM_DOCS;idx++) {
        final Document doc = new Document();
        String s = isS1.get(idx) ? s1 : s2;
        final Field f = newField("field", s, Field.Index.ANALYZED);
        final int count = _TestUtil.nextInt(random, 1, 4);
        for(int ct=0;ct<count;ct++) {
          doc.add(f);
        }
        riw.addDocument(doc);
      }

      r = riw.getReader();
      riw.close();
    } else {
      r = IndexReader.open(dir);
    }

    /*
    if (VERBOSE) {
      System.out.println("TEST: terms");
      TermEnum termEnum = r.terms();
      while(termEnum.next()) {
        System.out.println("  term=" + termEnum.term() + " len=" + termEnum.term().text().length());
        assertTrue(termEnum.docFreq() > 0);
        System.out.println("    s1?=" + (termEnum.term().text().equals(s1)) + " s1len=" + s1.length());
        System.out.println("    s2?=" + (termEnum.term().text().equals(s2)) + " s2len=" + s2.length());
        final String s = termEnum.term().text();
        for(int idx=0;idx<s.length();idx++) {
          System.out.println("      ch=0x" + Integer.toHexString(s.charAt(idx)));
        }
      }
    }
    */

    assertEquals(NUM_DOCS, r.numDocs());
    assertTrue(r.docFreq(new Term("field", s1)) > 0);
    assertTrue(r.docFreq(new Term("field", s2)) > 0);

    final byte[] payload = new byte[100];

    int num = atLeast(1000);
    for(int iter=0;iter<num;iter++) {

      final String term;
      final boolean doS1;
      if (random.nextBoolean()) {
        term = s1;
        doS1 = true;
      } else {
        term = s2;
        doS1 = false;
      }

      if (VERBOSE) {
        System.out.println("\nTEST: iter=" + iter + " doS1=" + doS1);
      }
       
      final TermPositions postings = r.termPositions(new Term("field", term));

      int docID = -1;
      while(docID < Integer.MAX_VALUE) {
        final int what = random.nextInt(3);
        if (what == 0) {
          if (VERBOSE) {
            System.out.println("TEST: docID=" + docID + "; do next()");
          }
          // nextDoc
          int expected = docID+1;
          while(true) {
            if (expected == NUM_DOCS) {
              expected = Integer.MAX_VALUE;
              break;
            } else if (isS1.get(expected) == doS1) {
              break;
            } else {
              expected++;
            }
          }
          boolean result = postings.next();
          if (!result) {
            assertEquals(Integer.MAX_VALUE, expected);
            if (VERBOSE) {
              System.out.println("  end");
            }
            break;
          } else {
            docID = postings.doc();
            if (VERBOSE) {
              System.out.println("  got docID=" + docID);
            }
            assertEquals(expected, docID);

            if (random.nextInt(6) == 3) {
              final int freq = postings.freq();
              assertTrue(freq >=1 && freq <= 4);
              for(int pos=0;pos<freq;pos++) {
                assertEquals(pos, postings.nextPosition());
                if (random.nextBoolean() && postings.isPayloadAvailable()) {
                  postings.getPayload(payload, 0);
                }
              }
            }
          }
        } else {
          // advance
          final int targetDocID;
          if (docID == -1) {
            targetDocID = random.nextInt(NUM_DOCS+1);
          } else {
            targetDocID = docID + _TestUtil.nextInt(random, 1, NUM_DOCS - docID);
          }
          if (VERBOSE) {
            System.out.println("TEST: docID=" + docID + "; do skipTo(" + targetDocID + ")");
          }
          int expected = targetDocID;
          while(true) {
            if (expected == NUM_DOCS) {
              expected = Integer.MAX_VALUE;
              break;
            } else if (isS1.get(expected) == doS1) {
              break;
            } else {
              expected++;
            }
          }
         
          final boolean result = postings.skipTo(targetDocID);
          if (!result) {
            assertEquals(Integer.MAX_VALUE, expected);
            if (VERBOSE) {
              System.out.println("  end");
            }
            break;
          } else {
            docID = postings.doc();
            if (VERBOSE) {
              System.out.println("  got docID=" + docID);
            }
            assertEquals(expected, docID);
         
            if (random.nextInt(6) == 3) {
              final int freq = postings.freq();
              assertTrue(freq >=1 && freq <= 4);
              for(int pos=0;pos<freq;pos++) {
                assertEquals(pos, postings.nextPosition());
                if (random.nextBoolean() && postings.isPayloadAvailable()) {
                  postings.getPayload(payload, 0);
                }
              }
            }
          }
        }
      }
    }
    r.close();
    dir.close();
  }
View Full Code Here

      System.out.println("ERROR: cannot specify both -fix and -segment");
      System.exit(1);
    }

    System.out.println("\nOpening index @ " + indexPath + "\n");
    Directory dir = null;
    try {
      dir = FSDirectory.open(new File(indexPath));
    } catch (Throwable t) {
      System.out.println("ERROR: could not open directory \"" + indexPath + "\"; exiting");
      t.printStackTrace(System.out);
View Full Code Here

    public void testEmptyIndex()
        throws Exception
    {
        // creating two directories for indices
        Directory indexStoreA = new MockRAMDirectory();
        Directory indexStoreB = new MockRAMDirectory();

        // creating a document to store
        Document lDoc = new Document();
        lDoc.add(new Field("fulltext", "Once upon a time.....", Field.Store.YES, Field.Index.ANALYZED));
        lDoc.add(new Field("id", "doc1", Field.Store.YES, Field.Index.NOT_ANALYZED));
        lDoc.add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));

        // creating a document to store
        Document lDoc2 = new Document();
        lDoc2.add(new Field("fulltext", "in a galaxy far far away.....",
            Field.Store.YES, Field.Index.ANALYZED));
        lDoc2.add(new Field("id", "doc2", Field.Store.YES, Field.Index.NOT_ANALYZED));
        lDoc2.add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));

        // creating a document to store
        Document lDoc3 = new Document();
        lDoc3.add(new Field("fulltext", "a bizarre bug manifested itself....",
            Field.Store.YES, Field.Index.ANALYZED));
        lDoc3.add(new Field("id", "doc3", Field.Store.YES, Field.Index.NOT_ANALYZED));
        lDoc3.add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));

        // creating an index writer for the first index
        IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
        // creating an index writer for the second index, but writing nothing
        IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);

        //--------------------------------------------------------------------
        // scenario 1
        //--------------------------------------------------------------------

        // writing the documents to the first index
        writerA.addDocument(lDoc);
        writerA.addDocument(lDoc2);
        writerA.addDocument(lDoc3);
        writerA.optimize();
        writerA.close();

        // closing the second index
        writerB.close();

        // creating the query
        QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "fulltext", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
        Query query = parser.parse("handle:1");

        // building the searchables
        Searcher[] searchers = new Searcher[2];
        // VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
        searchers[0] = new IndexSearcher(indexStoreB, true);
        searchers[1] = new IndexSearcher(indexStoreA, true);
        // creating the multiSearcher
        Searcher mSearcher = getMultiSearcherInstance(searchers);
        // performing the search
        ScoreDoc[] hits = mSearcher.search(query, null, 1000).scoreDocs;

        assertEquals(3, hits.length);

        // iterating over the hit documents
        for (int i = 0; i < hits.length; i++) {
          mSearcher.doc(hits[i].doc);
        }
        mSearcher.close();


        //--------------------------------------------------------------------
        // scenario 2
        //--------------------------------------------------------------------

        // adding one document to the empty index
        writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
        writerB.addDocument(lDoc);
        writerB.optimize();
        writerB.close();

        // building the searchables
        Searcher[] searchers2 = new Searcher[2];
        // VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
        searchers2[0] = new IndexSearcher(indexStoreB, true);
        searchers2[1] = new IndexSearcher(indexStoreA, true);
        // creating the mulitSearcher
        MultiSearcher mSearcher2 = getMultiSearcherInstance(searchers2);
        // performing the same search
        ScoreDoc[] hits2 = mSearcher2.search(query, null, 1000).scoreDocs;

        assertEquals(4, hits2.length);

        // iterating over the hit documents
        for (int i = 0; i < hits2.length; i++) {
          // no exception should happen at this point
          mSearcher2.doc(hits2[i].doc);
        }

        // test the subSearcher() method:
        Query subSearcherQuery = parser.parse("id:doc1");
        hits2 = mSearcher2.search(subSearcherQuery, null, 1000).scoreDocs;
        assertEquals(2, hits2.length);
        assertEquals(0, mSearcher2.subSearcher(hits2[0].doc));   // hit from searchers2[0]
        assertEquals(1, mSearcher2.subSearcher(hits2[1].doc));   // hit from searchers2[1]
        subSearcherQuery = parser.parse("id:doc2");
        hits2 = mSearcher2.search(subSearcherQuery, null, 1000).scoreDocs;
        assertEquals(1, hits2.length);
        assertEquals(1, mSearcher2.subSearcher(hits2[0].doc));   // hit from searchers2[1]
        mSearcher2.close();

        //--------------------------------------------------------------------
        // scenario 3
        //--------------------------------------------------------------------

        // deleting the document just added, this will cause a different exception to take place
        Term term = new Term("id", "doc1");
        IndexReader readerB = IndexReader.open(indexStoreB, false);
        readerB.deleteDocuments(term);
        readerB.close();

        // optimizing the index with the writer
        writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
        writerB.optimize();
        writerB.close();

        // building the searchables
        Searcher[] searchers3 = new Searcher[2];

        searchers3[0] = new IndexSearcher(indexStoreB, true);
        searchers3[1] = new IndexSearcher(indexStoreA, true);
        // creating the mulitSearcher
        Searcher mSearcher3 = getMultiSearcherInstance(searchers3);
        // performing the same search
        ScoreDoc[] hits3 = mSearcher3.search(query, null, 1000).scoreDocs;

        assertEquals(3, hits3.length);

        // iterating over the hit documents
        for (int i = 0; i < hits3.length; i++) {
          mSearcher3.doc(hits3[i].doc);
        }
        mSearcher3.close();
        indexStoreA.close();
        indexStoreB.close();
    }
View Full Code Here

public class TestIndexWriterMergePolicy extends LuceneTestCase {

  // Test the normal case
  public void testNormalCase() throws IOException {
    Directory dir = new RAMDirectory();

    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    writer.setMaxBufferedDocs(10);
    writer.setMergeFactor(10);
    writer.setMergePolicy(new LogDocMergePolicy(writer));
View Full Code Here

    writer.close();
  }

  // Test to see if there is over merge
  public void testNoOverMerge() throws IOException {
    Directory dir = new RAMDirectory();

    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    writer.setMaxBufferedDocs(10);
    writer.setMergeFactor(10);
    writer.setMergePolicy(new LogDocMergePolicy(writer));
View Full Code Here

TOP

Related Classes of org.apache.lucene.store.Directory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.