Package org.apache.lucene.index

Examples of org.apache.lucene.index.LogByteSizeMergePolicy


      Version matchVersion) throws CoreException {
    logger.info("Opening index writer for '" + log.getName() + "'..."); //$NON-NLS-1$ //$NON-NLS-2$
    IndexWriter writer = null;
    try {
      Directory dir = FSDirectory.open(IndexPlugin.getDefault().getIndexFile(log));
      LogMergePolicy mp = new LogByteSizeMergePolicy();
      mp.setMergeFactor(30);
      IndexWriterConfig cfg = new IndexWriterConfig(matchVersion, analyzer);
      cfg.setMaxBufferedDocs(1000);
      cfg.setMergePolicy(mp);
      writer = new IndexWriter(dir, cfg);
      try {
View Full Code Here


    // Make sure we use a MergePolicy which always merges adjacent segments and thus
    // keeps the doc IDs ordered as well (this is crucial for the taxonomy index).
    return new IndexWriterConfig(Version.LUCENE_43,
        null).setOpenMode(openMode).setMergePolicy(
        new LogByteSizeMergePolicy());
  }
View Full Code Here

   * we need to override the MergeScheduler to handle background errors, and a new instance needs to be created for each
   * new IndexWriter.
   * Also each new IndexWriter needs a new MergePolicy.
   */
  private IndexWriter createNewIndexWriter() throws IOException {
    LogByteSizeMergePolicy newMergePolicy = indexParameters.getNewMergePolicy(); //TODO make it possible to configure a different policy?
    writerConfig.setMergePolicy( newMergePolicy );
    MergeScheduler mergeScheduler = new ConcurrentMergeScheduler( this.errorHandler, this.indexName );
    writerConfig.setMergeScheduler( mergeScheduler );
    return new IndexWriter( directoryProvider.getDirectory(), writerConfig );
  }
View Full Code Here

   * we need to override the MergeScheduler to handle background errors, and a new instance needs to be created for each
   * new IndexWriter.
   * Also each new IndexWriter needs a new MergePolicy.
   */
  private IndexWriter createNewIndexWriter() throws IOException {
    LogByteSizeMergePolicy newMergePolicy = indexParameters.getNewMergePolicy(); //TODO make it possible to configure a different policy?
    writerConfig.setMergePolicy( newMergePolicy );
    MergeScheduler mergeScheduler = new ConcurrentMergeScheduler( this.errorHandler, this.indexName );
    writerConfig.setMergeScheduler( mergeScheduler );
    return new IndexWriter( directoryProvider.getDirectory(), writerConfig );
  }
View Full Code Here

      File dir = new File(dataDir + "/" + name);
      NIOFSDirectory directory = new NIOFSDirectory(dir);

      IndexWriter writer;
      IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_40, analyzer);
      LogByteSizeMergePolicy mergePolicy = new LogByteSizeMergePolicy();
      mergePolicy.setMaxMergeMB(50.0);
      mergePolicy.setUseCompoundFile(true);
      mergePolicy.setMergeFactor(8);
      writerConfig.setMergePolicy(mergePolicy);
      writerConfig.setRAMBufferSizeMB(128.0);
      writerConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);

      writer = new IndexWriter(directory, writerConfig);
View Full Code Here

    // :Post-Release-Update-Version.LUCENE_XY:
    // Make sure we use a MergePolicy which always merges adjacent segments and thus
    // keeps the doc IDs ordered as well (this is crucial for the taxonomy index).
    return new IndexWriterConfig(Version.LUCENE_48,
        null).setOpenMode(openMode).setMergePolicy(
        new LogByteSizeMergePolicy());
  }
View Full Code Here

   
    // hold onto IW to forceMerge
    // note how we don't close it, since DTW will close it.
    final IndexWriter iw = new IndexWriter(dir,
        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
            .setMergePolicy(new LogByteSizeMergePolicy()));
    DirectoryTaxonomyWriter writer = new DirectoryTaxonomyWriter(dir) {
      @Override
      protected IndexWriter openIndexWriter(Directory directory,
          IndexWriterConfig config) throws IOException {
        return iw;
View Full Code Here

   
    // hold onto IW to forceMerge
    // note how we don't close it, since DTW will close it.
    final IndexWriter iw = new IndexWriter(dir,
        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
            .setMergePolicy(new LogByteSizeMergePolicy()));
    DirectoryTaxonomyWriter writer = new DirectoryTaxonomyWriter(dir) {
      @Override
      protected IndexWriter openIndexWriter(Directory directory,
          IndexWriterConfig config) throws IOException {
        return iw;
View Full Code Here

   * we need to override the MergeScheduler to handle background errors, and a new instance needs to be created for each
   * new IndexWriter.
   * Also each new IndexWriter needs a new MergePolicy.
   */
  private IndexWriter createNewIndexWriter() throws IOException {
    LogByteSizeMergePolicy newMergePolicy = indexParameters.getNewMergePolicy(); //TODO make it possible to configure a different policy?
    writerConfig.setMergePolicy( newMergePolicy );
    MergeScheduler mergeScheduler = new ConcurrentMergeScheduler( this.errorHandler );
    writerConfig.setMergeScheduler( mergeScheduler );
    IndexWriter writer = new IndexWriter( directoryProvider.getDirectory(), writerConfig );
    return writer;
View Full Code Here

  public static AlcoholicMergePolicy newAlcoholicMergePolicy(Random r, TimeZone tz) {
    return new AlcoholicMergePolicy(tz, new Random(r.nextLong()));
  }

  public static LogMergePolicy newLogMergePolicy(Random r) {
    LogMergePolicy logmp = r.nextBoolean() ? new LogDocMergePolicy() : new LogByteSizeMergePolicy();
    logmp.setCalibrateSizeByDeletes(r.nextBoolean());
    if (rarely(r)) {
      logmp.setMergeFactor(TestUtil.nextInt(r, 2, 9));
    } else {
      logmp.setMergeFactor(TestUtil.nextInt(r, 10, 50));
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.LogByteSizeMergePolicy

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.