double growthFactor = 4;
int maxSleepTime = 1000 * Constants.DEFAULT_MINOR_COMPACTION_MAX_SLEEP_TIME;
boolean reportedProblem = false;
do {
FileSKVWriter mfw = null;
try {
long t1 = System.currentTimeMillis();
AccumuloConfiguration tableConf = AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId()
.toString());
FileOperations fileOps = FileOperations.getInstance();
mfw = fileOps.openWriter(dirname, fs, conf, tableConf);
DeletingIterator sourceItr = new DeletingIterator(new ColumnFamilySkippingIterator(map.skvIterator()), true);
TabletIteratorEnvironment iterEnv = new TabletIteratorEnvironment(IteratorScope.minc, tableConf);
SortedKeyValueIterator<Key,Value> fai = IteratorUtil.loadIterators(IteratorScope.minc, sourceItr, extent, tableConf, iterEnv);
fai.seek(new Range(), LocalityGroupUtil.EMPTY_CF_SET, false);
long entriesCompacted = 0;
Map<String,Set<ByteSequence>> groups = LocalityGroupUtil.getLocalityGroups(tableConf);
if (groups.size() > 0 && mfw.supportsLocalityGroups()) {
entriesCompacted = partitionData(fai, groups, mfw);
} else {
// no locality groups or locality groups not supported,
// so just write everything to default
mfw.startDefaultLocalityGroup();
while (fai.hasTop()) {
mfw.append(fai.getTopKey(), fai.getTopValue());
fai.next();
entriesCompacted++;
}
}
mfw.close();
// Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
try {
FileSKVIterator openReader = fileOps.openReader(dirname, false, fs, conf, tableConf);
openReader.close();
} catch (IOException ex) {
log.error("Verification of successful file write fails!!!", ex);
throw ex;
}
long t2 = System.currentTimeMillis();
log.debug(String.format("MinC %,d recs in | %,d recs out | %,d recs/sec | %6.3f secs | %,d bytes ", map.size(), entriesCompacted,
(int) (map.size() / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0, estimatedSizeInBytes()));
if (reportedProblem) {
ProblemReports.getInstance().deleteProblemReport(extent.getTableId().toString(), ProblemType.FILE_WRITE, dirname);
}
return new DataFileValue(fileOps.getFileSize(dirname, fs, conf,
AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId().toString())), entriesCompacted);
} catch (IOException e) {
log.warn("MinC failed (" + e.getMessage() + ") to create " + dirname + " retrying ...");
ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_WRITE, dirname, e));
reportedProblem = true;
} catch (LocalityGroupConfigurationError e) {
log.warn("MinC failed (" + e.getMessage() + ") to create " + dirname + " retrying ...");
ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_WRITE, dirname, e));
reportedProblem = true;
} catch (RuntimeException e) {
// if this is coming from a user iterator, it is possible that the user could change the iterator config and that the
// minor compaction would succeed
log.warn("MinC failed (" + e.getMessage() + ") to create " + dirname + " retrying ...", e);
ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_WRITE, dirname, e));
reportedProblem = true;
} finally {
try {
if (mfw != null)
mfw.close();
} catch (IOException e1) {
log.error(e1, e1);
}
}