if (logger.isDebugEnabled())
logger.debug("Expected bloom filter size : " + expectedBloomFilterSize);
SSTableWriter writer;
CompactionIterator ci = new CompactionIterator(sstables, gcBefore, major); // retain a handle so we can call close()
Iterator<CompactionIterator.CompactedRow> nni = new FilterIterator(ci, PredicateUtils.notNullPredicate());
executor.beginCompaction(cfs, ci);
try
{
if (!nni.hasNext())
{
// don't mark compacted in the finally block, since if there _is_ nondeleted data,
// we need to sync it (via closeAndOpen) first, so there is no period during which
// a crash could cause data loss.
cfs.markCompacted(sstables);
return 0;
}
String newFilename = new File(compactionFileLocation, cfs.getTempSSTableFileName()).getAbsolutePath();
writer = new SSTableWriter(newFilename, expectedBloomFilterSize, StorageService.getPartitioner());
// validate the CF as we iterate over it
AntiEntropyService.IValidator validator = AntiEntropyService.instance.getValidator(table.name, cfs.getColumnFamilyName(), null, major);
validator.prepare();
while (nni.hasNext())
{
CompactionIterator.CompactedRow row = nni.next();
long prevpos = writer.getFilePointer();
writer.append(row.key, row.buffer);
validator.add(row);
totalkeysWritten++;