int expectedBloomFilterSize = Math.max(DatabaseDescriptor.getIndexInterval(), (int)SSTableReader.getApproximateKeyCount(sstables));
if (logger.isDebugEnabled())
logger.debug("Expected bloom filter size : " + expectedBloomFilterSize);
SSTableWriter writer;
CompactionIterator ci = new CompactionIterator(cfs, sstables, gcBefore, major); // retain a handle so we can call close()
Iterator<AbstractCompactedRow> nni = new FilterIterator(ci, PredicateUtils.notNullPredicate());
executor.beginCompaction(cfs.columnFamily, ci);
Map<DecoratedKey, Long> cachedKeys = new HashMap<DecoratedKey, Long>();
try
{
if (!nni.hasNext())
{
// don't mark compacted in the finally block, since if there _is_ nondeleted data,
// we need to sync it (via closeAndOpen) first, so there is no period during which
// a crash could cause data loss.
cfs.markCompacted(sstables);
return 0;
}
String newFilename = new File(cfs.getTempSSTablePath(compactionFileLocation)).getAbsolutePath();
writer = new SSTableWriter(newFilename, expectedBloomFilterSize, cfs.metadata, cfs.partitioner);
while (nni.hasNext())
{
AbstractCompactedRow row = nni.next();
long position = writer.append(row);
totalkeysWritten++;
for (SSTableReader sstable : sstables)
{
if (sstable.getCachedPosition(row.key) != null)
{
cachedKeys.put(row.key, position);
break;
}
}
}
}
finally
{
ci.close();
}
SSTableReader ssTable = writer.closeAndOpenReader(getMaxDataAge(sstables));
cfs.replaceCompactedSSTables(sstables, Arrays.asList(ssTable));
for (Entry<DecoratedKey, Long> entry : cachedKeys.entrySet())