Package org.apache.lucene.index

Examples of org.apache.lucene.index.TieredMergePolicy$SegmentByteSizeDescending


    }
    return logmp;
  }

  public static TieredMergePolicy newTieredMergePolicy(Random r) {
    TieredMergePolicy tmp = new TieredMergePolicy();
    if (r.nextInt(3) == 2) {
      tmp.setMaxMergeAtOnce(2);
      tmp.setMaxMergeAtOnceExplicit(2);
    } else {
      tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 2, 20));
      tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 2, 30));
    }
    tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0);
    tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0);
    tmp.setExpungeDeletesPctAllowed(0.0 + r.nextDouble() * 30.0);
    tmp.setSegmentsPerTier(_TestUtil.nextInt(r, 2, 20));
    tmp.setUseCompoundFile(r.nextBoolean());
    tmp.setNoCFSRatio(0.1 + r.nextDouble()*0.8);
    return tmp;
  }
View Full Code Here


 
  public void testTieredMergePolicyConfig() throws Exception {
    IndexWriterConfig iwc = solrConfig.defaultIndexConfig.toIndexWriterConfig(h.getCore().getSchema());
    MergePolicy mp = iwc.getMergePolicy();
    assertTrue(mp instanceof TieredMergePolicy);
    TieredMergePolicy tieredMP = (TieredMergePolicy) mp;

    // mp-specific setter
    assertEquals(19, tieredMP.getMaxMergeAtOnceExplicit());
   
    // make sure we apply compoundFile and mergeFactor
    assertEquals(false, tieredMP.getUseCompoundFile());
    assertEquals(7, tieredMP.getMaxMergeAtOnce());
   
    // make sure we overrode segmentsPerTier (split from maxMergeAtOnce out of mergeFactor)
    assertEquals(9D, tieredMP.getSegmentsPerTier(), 0.001);
  }
View Full Code Here

    return DirectoryReader.open(directory);
  }

  private void populate(Directory directory) throws IOException {
    IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new StandardAnalyzer(Version.LUCENE_43));
    TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
    mergePolicy.setUseCompoundFile(false);
    IndexWriter writer = new IndexWriter(directory, conf);
    addDocs(writer);
    writer.close();
  }
View Full Code Here

    return DirectoryReader.open(directory);
  }

  private void populate(Directory directory) throws IOException {
    IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new StandardAnalyzer(Version.LUCENE_43));
    TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
    mergePolicy.setUseCompoundFile(false);
    IndexWriter writer = new IndexWriter(directory, conf);
    addDocs(writer);
    writer.close();
  }
View Full Code Here

    Analyzer analyzer = _fieldManager.getAnalyzerForIndex();

    _conf = new IndexWriterConfig(LuceneVersionConstant.LUCENE_VERSION, analyzer);
    _conf.setCodec(new Blur022Codec());
    _conf.setSimilarity(tableContext.getSimilarity());
    TieredMergePolicy mergePolicy = (TieredMergePolicy) _conf.getMergePolicy();
    mergePolicy.setUseCompoundFile(false);

    _overFlowConf = _conf.clone();

    if (_indexLocally) {
      String localDirPath = System.getProperty(JAVA_IO_TMPDIR);
View Full Code Here

    _mainDirectory = new HdfsDirectory(configuration, _shardPath);
    _fieldManager = tableContext.getFieldManager();
    Analyzer analyzerForIndex = _fieldManager.getAnalyzerForIndex();
    IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, analyzerForIndex);
    // conf.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES);
    TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
    mergePolicy.setUseCompoundFile(false);
    _commitWriter = new IndexWriter(commitDirectory, conf.clone());

    // Make sure there's an empty index...
    new IndexWriter(_mainDirectory, conf.clone()).close();
    _mainWriter = new IndexWriter(_mainDirectory, conf.clone());
View Full Code Here

    _conf = new IndexWriterConfig(LUCENE_VERSION, analyzer);
    _conf.setWriteLockTimeout(TimeUnit.MINUTES.toMillis(5));
    _conf.setCodec(new Blur022Codec(_tableContext.getBlurConfiguration()));
    _conf.setSimilarity(_tableContext.getSimilarity());
    _conf.setMergedSegmentWarmer(new BlurIndexReaderWarmer(shardContext, _isClosed, indexWarmup));
    TieredMergePolicy mergePolicy = (TieredMergePolicy) _conf.getMergePolicy();
    mergePolicy.setUseCompoundFile(false);
    _conf.setMergeScheduler(mergeScheduler.getMergeScheduler());
    _snapshotIndexDeletionPolicy = new SnapshotIndexDeletionPolicy(_tableContext.getConfiguration(), new Path(
        shardContext.getHdfsDirPath(), "generations"));
    _policy = new IndexDeletionPolicyReader(_snapshotIndexDeletionPolicy);
    _conf.setIndexDeletionPolicy(_policy);
View Full Code Here

    Analyzer analyzer = _fieldManager.getAnalyzerForIndex();

    _conf = new IndexWriterConfig(LuceneVersionConstant.LUCENE_VERSION, analyzer);
    _conf.setCodec(new Blur022Codec());
    _conf.setSimilarity(tableContext.getSimilarity());
    TieredMergePolicy mergePolicy = (TieredMergePolicy) _conf.getMergePolicy();
    mergePolicy.setUseCompoundFile(false);

    _overFlowConf = _conf.clone();

    if (_indexLocally) {
      String localDirPath = System.getProperty(JAVA_IO_TMPDIR);
View Full Code Here

  private static void createShard(Configuration configuration, int i, Path path, int totalShardCount)
      throws IOException {
    HdfsDirectory hdfsDirectory = new HdfsDirectory(configuration, path);
    IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer());
    TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
    mergePolicy.setUseCompoundFile(false);
    IndexWriter indexWriter = new IndexWriter(hdfsDirectory, conf);

    Partitioner<IntWritable, IntWritable> partitioner = new HashPartitioner<IntWritable, IntWritable>();
    int partition = partitioner.getPartition(new IntWritable(i), null, totalShardCount);
    assertEquals(i, partition);
View Full Code Here

    }
    return logmp;
  }

  public static TieredMergePolicy newTieredMergePolicy(Random r) {
    TieredMergePolicy tmp = new TieredMergePolicy();
    if (rarely(r)) {
      tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 2, 4));
      tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 2, 4));
    } else {
      tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 5, 50));
      tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 5, 50));
    }
    tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0);
    tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0);
    tmp.setExpungeDeletesPctAllowed(0.0 + r.nextDouble() * 30.0);
    tmp.setSegmentsPerTier(_TestUtil.nextInt(r, 2, 20));
    tmp.setUseCompoundFile(r.nextBoolean());
    tmp.setNoCFSRatio(0.1 + r.nextDouble()*0.8);
    tmp.setReclaimDeletesWeight(r.nextDouble()*4);
    return tmp;
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.TieredMergePolicy$SegmentByteSizeDescending

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.