Package org.apache.lucene.store

Examples of org.apache.lucene.store.Directory


    dir.close();
  }

  // test that batched delete terms are flushed together
  public void testBatchDeletes() throws IOException {
    Directory dir = newDirectory();
    IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
        TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2)
        .setMaxBufferedDeleteTerms(2));

    int id = 0;
    int value = 100;

    for (int i = 0; i < 7; i++) {
      addDoc(modifier, ++id, value);
    }
    modifier.commit();

    IndexReader reader = IndexReader.open(dir, true);
    assertEquals(7, reader.numDocs());
    reader.close();
     
    id = 0;
    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));

    modifier.commit();

    reader = IndexReader.open(dir, true);
    assertEquals(5, reader.numDocs());
    reader.close();

    Term[] terms = new Term[3];
    for (int i = 0; i < terms.length; i++) {
      terms[i] = new Term("id", String.valueOf(++id));
    }
    modifier.deleteDocuments(terms);
    modifier.commit();
    reader = IndexReader.open(dir, true);
    assertEquals(2, reader.numDocs());
    reader.close();

    modifier.close();
    dir.close();
  }
View Full Code Here


    dir.close();
  }

  // test deleteAll()
  public void testDeleteAll() throws IOException {
    Directory dir = newDirectory();
    IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
        TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2)
        .setMaxBufferedDeleteTerms(2));

    int id = 0;
    int value = 100;

    for (int i = 0; i < 7; i++) {
      addDoc(modifier, ++id, value);
    }
    modifier.commit();

    IndexReader reader = IndexReader.open(dir, true);
    assertEquals(7, reader.numDocs());
    reader.close();

    // Add 1 doc (so we will have something buffered)
    addDoc(modifier, 99, value);

    // Delete all
    modifier.deleteAll();

    // Delete all shouldn't be on disk yet
    reader = IndexReader.open(dir, true);
    assertEquals(7, reader.numDocs());
    reader.close();

    // Add a doc and update a doc (after the deleteAll, before the commit)
    addDoc(modifier, 101, value);
    updateDoc(modifier, 102, value);

    // commit the delete all
    modifier.commit();

    // Validate there are no docs left
    reader = IndexReader.open(dir, true);
    assertEquals(2, reader.numDocs());
    reader.close();

    modifier.close();
    dir.close();
  }
View Full Code Here

    dir.close();
  }

  // test rollback of deleteAll()
  public void testDeleteAllRollback() throws IOException {
    Directory dir = newDirectory();
    IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
        TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2)
        .setMaxBufferedDeleteTerms(2));
   
    int id = 0;
    int value = 100;
   
    for (int i = 0; i < 7; i++) {
      addDoc(modifier, ++id, value);
    }
    modifier.commit();
   
    addDoc(modifier, ++id, value);

    IndexReader reader = IndexReader.open(dir, true);
    assertEquals(7, reader.numDocs());
    reader.close();
   
    // Delete all
    modifier.deleteAll();

    // Roll it back
    modifier.rollback();
    modifier.close();
   
    // Validate that the docs are still there
    reader = IndexReader.open(dir, true);
    assertEquals(7, reader.numDocs());
    reader.close();
   
    dir.close();
  }
View Full Code Here

  }


  // test deleteAll() w/ near real-time reader
  public void testDeleteAllNRT() throws IOException {
    Directory dir = newDirectory();
    IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
        TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2)
        .setMaxBufferedDeleteTerms(2));
   
    int id = 0;
    int value = 100;
   
    for (int i = 0; i < 7; i++) {
      addDoc(modifier, ++id, value);
    }
    modifier.commit();

    IndexReader reader = modifier.getReader();
    assertEquals(7, reader.numDocs());
    reader.close();

    addDoc(modifier, ++id, value);
    addDoc(modifier, ++id, value);
   
    // Delete all
    modifier.deleteAll();

    reader = modifier.getReader();
    assertEquals(0, reader.numDocs());
    reader.close();
   

    // Roll it back
    modifier.rollback();
    modifier.close();
   
    // Validate that the docs are still there
    reader = IndexReader.open(dir, true);
    assertEquals(7, reader.numDocs());
    reader.close();
   
    dir.close();
  }
View Full Code Here

    }
    return s;
  }
 
  public void testDeleteAllSlowly() throws Exception {
    final Directory dir = newDirectory();
    RandomIndexWriter w = new RandomIndexWriter(random, dir);
    final int NUM_DOCS = atLeast(1000);
    final List<Integer> ids = new ArrayList<Integer>(NUM_DOCS);
    for(int id=0;id<NUM_DOCS;id++) {
      ids.add(id);
    }
    Collections.shuffle(ids, random);
    for(int id : ids) {
      Document doc = new Document();
      doc.add(newField("id", ""+id, Field.Index.NOT_ANALYZED));
      w.addDocument(doc);
    }
    Collections.shuffle(ids, random);
    int upto = 0;
    while(upto < ids.size()) {
      final int left = ids.size() - upto;
      final int inc = Math.min(left, _TestUtil.nextInt(random, 1, 20));
      final int limit = upto + inc;
      while(upto < limit) {
        w.deleteDocuments(new Term("id", ""+ids.get(upto++)));
      }
      final IndexReader r = w.getReader();
      assertEquals(NUM_DOCS - upto, r.numDocs());
      r.close();
    }

    w.close();
    dir.close();
  }
View Full Code Here

    dir.close();
  }
 
  public void testIndexingThenDeleting() throws Exception {
    final Random r = random;
    Directory dir = newDirectory();
    // note this test explicitly disables payloads
    final Analyzer analyzer = new Analyzer() {
      @Override
      public TokenStream tokenStream(String fieldName, Reader reader) {
        return new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
      }
    };
    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setRAMBufferSizeMB(1.0).setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH).setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH));
    w.setInfoStream(VERBOSE ? System.out : null);
    Document doc = new Document();
    doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
    int num = atLeast(3);
    for (int iter = 0; iter < num; iter++) {
      int count = 0;

      final boolean doIndexing = r.nextBoolean();
      if (VERBOSE) {
        System.out.println("TEST: iter doIndexing=" + doIndexing);
      }
      if (doIndexing) {
        // Add docs until a flush is triggered
        final int startFlushCount = w.getFlushCount();
        while(w.getFlushCount() == startFlushCount) {
          w.addDocument(doc);
          count++;
        }
      } else {
        // Delete docs until a flush is triggered
        final int startFlushCount = w.getFlushCount();
        while(w.getFlushCount() == startFlushCount) {
          w.deleteDocuments(new Term("foo", ""+count));
          count++;
        }
      }
      assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 3000);
    }
    w.close();
    dir.close();
  }
View Full Code Here

   */
  public void testExpirationTimeDeletionPolicy() throws IOException, InterruptedException {

    final double SECONDS = 2.0;

    Directory dir = newDirectory();
    ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS);
    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
        new MockAnalyzer(random))
        .setIndexDeletionPolicy(policy);
    MergePolicy mp = conf.getMergePolicy();
    if (mp instanceof LogMergePolicy) {
      setUseCompoundFile(mp, true);
    }
    IndexWriter writer = new IndexWriter(dir, conf);
    writer.close();

    final int ITER = 9;

    long lastDeleteTime = 0;
    for(int i=0;i<ITER;i++) {
      // Record last time when writer performed deletes of
      // past commits
      lastDeleteTime = System.currentTimeMillis();
      conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
          new MockAnalyzer(random)).setOpenMode(
          OpenMode.APPEND).setIndexDeletionPolicy(policy);
      mp = conf.getMergePolicy();
      if (mp instanceof LogMergePolicy) {
        setUseCompoundFile(mp, true);
      }
      writer = new IndexWriter(dir, conf);
      for(int j=0;j<17;j++) {
        addDoc(writer);
      }
      writer.close();

      if (i < ITER-1) {
        // Make sure to sleep long enough so that some commit
        // points will be deleted:
        Thread.sleep((int) (1000.0*(SECONDS/5.0)));
      }
    }

    // First, make sure the policy in fact deleted something:
    assertTrue("no commits were deleted", policy.numDelete > 0);

    // Then simplistic check: just verify that the
    // segments_N's that still exist are in fact within SECONDS
    // seconds of the last one's mod time, and, that I can
    // open a reader on each:
    long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
   
    String fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
                                                            "",
                                                            gen);
    dir.deleteFile(IndexFileNames.SEGMENTS_GEN);

    boolean oneSecondResolution = true;

    while(gen > 0) {
      try {
        IndexReader reader = IndexReader.open(dir, true);
        reader.close();
        fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
                                                         "",
                                                         gen);

        // if we are on a filesystem that seems to have only
        // 1 second resolution, allow +1 second in commit
        // age tolerance:
        long modTime = dir.fileModified(fileName);
        oneSecondResolution &= (modTime % 1000) == 0;
        final long leeway = (long) ((SECONDS + (oneSecondResolution ? 1.0:0.0))*1000);

        assertTrue("commit point was older than " + SECONDS + " seconds (" + (lastDeleteTime - modTime) + " msec) but did not get deleted ", lastDeleteTime - modTime <= leeway);
      } catch (IOException e) {
        // OK
        break;
      }
     
      dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
      gen--;
    }

    dir.close();
  }
View Full Code Here

      boolean useCompoundFile = (pass % 2) != 0;

      // Never deletes a commit
      KeepAllDeletionPolicy policy = new KeepAllDeletionPolicy();

      Directory dir = newDirectory();
      policy.dir = dir;

      IndexWriterConfig conf = newIndexWriterConfig(
          TEST_VERSION_CURRENT, new MockAnalyzer(random))
          .setIndexDeletionPolicy(policy).setMaxBufferedDocs(10)
          .setMergeScheduler(new SerialMergeScheduler());
      MergePolicy mp = conf.getMergePolicy();
      if (mp instanceof LogMergePolicy) {
        setUseCompoundFile(mp, useCompoundFile);
      }
      IndexWriter writer = new IndexWriter(dir, conf);
      for(int i=0;i<107;i++) {
        addDoc(writer);
      }
      writer.close();

      final boolean isOptimized;
      {
        IndexReader r = IndexReader.open(dir);
        isOptimized = r.isOptimized();
        r.close();
      }
      if (!isOptimized) {
        conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
                                    new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(
                                                                                              OpenMode.APPEND).setIndexDeletionPolicy(policy);
        mp = conf.getMergePolicy();
        if (mp instanceof LogMergePolicy) {
          setUseCompoundFile(mp, true);
        }
        if (VERBOSE) {
          System.out.println("TEST: open writer for optimize");
        }
        writer = new IndexWriter(dir, conf);
        writer.setInfoStream(VERBOSE ? System.out : null);
        writer.optimize();
        writer.close();
      }
      assertEquals(isOptimized ? 0:1, policy.numOnInit);

      // If we are not auto committing then there should
      // be exactly 2 commits (one per close above):
      assertEquals(1 + (isOptimized ? 0:1), policy.numOnCommit);

      // Test listCommits
      Collection<IndexCommit> commits = IndexReader.listCommits(dir);
      // 2 from closing writer
      assertEquals(1 + (isOptimized ? 0:1), commits.size());

      // Make sure we can open a reader on each commit:
      for (final IndexCommit commit : commits) {
        IndexReader r = IndexReader.open(commit, null, false);
        r.close();
      }

      // Simplistic check: just verify all segments_N's still
      // exist, and, I can open a reader on each:
      dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
      while(gen > 0) {
        IndexReader reader = IndexReader.open(dir, true);
        reader.close();
        dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
        gen--;

        if (gen > 0) {
          // Now that we've removed a commit point, which
          // should have orphan'd at least one index file.
          // Open & close a writer and assert that it
          // actually removed something:
          int preCount = dir.listAll().length;
          writer = new IndexWriter(dir, newIndexWriterConfig(
              TEST_VERSION_CURRENT,
              new MockAnalyzer(random)).setOpenMode(
              OpenMode.APPEND).setIndexDeletionPolicy(policy));
          writer.close();
          int postCount = dir.listAll().length;
          assertTrue(postCount < preCount);
        }
      }

      dir.close();
    }
  }
View Full Code Here

   * point. */
  public void testOpenPriorSnapshot() throws IOException {
    // Never deletes a commit
    KeepAllDeletionPolicy policy = new KeepAllDeletionPolicy();

    Directory dir = newDirectory();
    policy.dir = dir;

    IndexWriter writer = new IndexWriter(
        dir,
        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
            setIndexDeletionPolicy(policy).
            setMaxBufferedDocs(2).
            setMergePolicy(newLogMergePolicy(10))
    );
    for(int i=0;i<10;i++) {
      addDoc(writer);
      if ((1+i)%2 == 0)
        writer.commit();
    }
    writer.close();

    Collection<IndexCommit> commits = IndexReader.listCommits(dir);
    assertEquals(5, commits.size());
    IndexCommit lastCommit = null;
    for (final IndexCommit commit : commits) {
      if (lastCommit == null || commit.getGeneration() > lastCommit.getGeneration())
        lastCommit = commit;
    }
    assertTrue(lastCommit != null);

    // Now add 1 doc and optimize
    writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT,
        new MockAnalyzer(random)).setIndexDeletionPolicy(policy));
    addDoc(writer);
    assertEquals(11, writer.numDocs());
    writer.optimize();
    writer.close();

    assertEquals(6, IndexReader.listCommits(dir).size());

    // Now open writer on the commit just before optimize:
    writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
        .setIndexDeletionPolicy(policy).setIndexCommit(lastCommit));
    assertEquals(10, writer.numDocs());

    // Should undo our rollback:
    writer.rollback();

    IndexReader r = IndexReader.open(dir, true);
    // Still optimized, still 11 docs
    assertTrue(r.isOptimized());
    assertEquals(11, r.numDocs());
    r.close();

    writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
        .setIndexDeletionPolicy(policy).setIndexCommit(lastCommit));
    assertEquals(10, writer.numDocs());
    // Commits the rollback:
    writer.close();

    // Now 8 because we made another commit
    assertEquals(7, IndexReader.listCommits(dir).size());
   
    r = IndexReader.open(dir, true);
    // Not optimized because we rolled it back, and now only
    // 10 docs
    assertTrue(!r.isOptimized());
    assertEquals(10, r.numDocs());
    r.close();

    // Reoptimize
    writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(policy));
    writer.optimize();
    writer.close();

    r = IndexReader.open(dir, true);
    assertTrue(r.isOptimized());
    assertEquals(10, r.numDocs());
    r.close();

    // Now open writer on the commit just before optimize,
    // but this time keeping only the last commit:
    writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexCommit(lastCommit));
    assertEquals(10, writer.numDocs());
   
    // Reader still sees optimized index, because writer
    // opened on the prior commit has not yet committed:
    r = IndexReader.open(dir, true);
    assertTrue(r.isOptimized());
    assertEquals(10, r.numDocs());
    r.close();

    writer.close();

    // Now reader sees unoptimized index:
    r = IndexReader.open(dir, true);
    assertTrue(!r.isOptimized());
    assertEquals(10, r.numDocs());
    r.close();

    dir.close();
  }
View Full Code Here

      boolean useCompoundFile = (pass % 2) != 0;

      KeepNoneOnInitDeletionPolicy policy = new KeepNoneOnInitDeletionPolicy();

      Directory dir = newDirectory();

      IndexWriterConfig conf = newIndexWriterConfig(
          TEST_VERSION_CURRENT, new MockAnalyzer(random))
          .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
          .setMaxBufferedDocs(10);
      MergePolicy mp = conf.getMergePolicy();
      if (mp instanceof LogMergePolicy) {
        setUseCompoundFile(mp, useCompoundFile);
      }
      IndexWriter writer = new IndexWriter(dir, conf);
      for(int i=0;i<107;i++) {
        addDoc(writer);
      }
      writer.close();

      conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
          .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
      mp = conf.getMergePolicy();
      if (mp instanceof LogMergePolicy) {
        setUseCompoundFile(mp, true);
      }
      writer = new IndexWriter(dir, conf);
      writer.optimize();
      writer.close();

      assertEquals(1, policy.numOnInit);
      // If we are not auto committing then there should
      // be exactly 2 commits (one per close above):
      assertEquals(2, policy.numOnCommit);

      // Simplistic check: just verify the index is in fact
      // readable:
      IndexReader reader = IndexReader.open(dir, true);
      reader.close();

      dir.close();
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.store.Directory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.