public void close(TaskAttemptContext context) throws IOException {
LOG.debug("Task " + context.getTaskAttemptID() + " merging into dstDir: " + workDir + ", srcDirs: " + shards);
writeShardNumberFile(context);
heartBeater.needHeartBeat();
try {
Directory mergedIndex = new HdfsDirectory(workDir, context.getConfiguration());
IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_CURRENT, null)
.setOpenMode(OpenMode.CREATE).setUseCompoundFile(false)
//.setMergePolicy(mergePolicy) // TODO: grab tuned MergePolicy from solrconfig.xml?
//.setMergeScheduler(...) // TODO: grab tuned MergeScheduler from solrconfig.xml?
;
if (LOG.isDebugEnabled()) {
writerConfig.setInfoStream(System.out);
}
// writerConfig.setRAMBufferSizeMB(100); // improve performance
// writerConfig.setMaxThreadStates(1);
// disable compound file to improve performance
// also see http://lucene.472066.n3.nabble.com/Questions-on-compound-file-format-td489105.html
// also see defaults in SolrIndexConfig
MergePolicy mergePolicy = writerConfig.getMergePolicy();
LOG.debug("mergePolicy was: {}", mergePolicy);
if (mergePolicy instanceof TieredMergePolicy) {
((TieredMergePolicy) mergePolicy).setNoCFSRatio(0.0);
// ((TieredMergePolicy) mergePolicy).setMaxMergeAtOnceExplicit(10000);
// ((TieredMergePolicy) mergePolicy).setMaxMergeAtOnce(10000);
// ((TieredMergePolicy) mergePolicy).setSegmentsPerTier(10000);
} else if (mergePolicy instanceof LogMergePolicy) {
((LogMergePolicy) mergePolicy).setNoCFSRatio(0.0);
}
LOG.info("Using mergePolicy: {}", mergePolicy);
IndexWriter writer = new IndexWriter(mergedIndex, writerConfig);
Directory[] indexes = new Directory[shards.size()];
for (int i = 0; i < shards.size(); i++) {
indexes[i] = new HdfsDirectory(shards.get(i), context.getConfiguration());
}
context.setStatus("Logically merging " + shards.size() + " shards into one shard");
LOG.info("Logically merging " + shards.size() + " shards into one shard: " + workDir);
long start = System.currentTimeMillis();