final int numBlocks = STRIPE_LENGTH + 1;
final int repl = 1;
setup(10, -1);
DistBlockIntegrityMonitor br = new DistBlockRegeneratorFake(conf);
Worker bc = br.getDecommissioningMonitor();
// Generate file
Path raidPath = new Path("/raidrs");
Path filePath = new Path("/user/hadoop/testReconstruction/file");
long[] crcs = createRandomFile(filePath, repl, numBlocks);
FileStatus file = fileSys.getFileStatus(filePath);
RaidNode.doRaid(conf, file, raidPath, Codec.getCodec("rs"),
new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
false, repl, repl);
// Do some testing
printFileLocations(file);
// We're gonna "decommission" the file
TestBlockCopier.decommissioningFiles =
new String[] { filePath.toUri().toString() };
// "Decommission" each of the file's blocks in turn
List<LocatedBlock> fileBlocks =
dfs.getNameNode().getBlockLocations(filePath.toUri().toString(),
0L,
file.getLen()).getLocatedBlocks();
for (LocatedBlock b : fileBlocks) {
TestBlockCopier.decommissioningBlocks = new LocatedBlock[] { b };
bc.checkAndReconstructBlocks();
long start = System.currentTimeMillis();
while ((br.jobsRunning() > 0)
&& ((System.currentTimeMillis() - start) < 30000)) {
LOG.info("Waiting on block regen jobs to complete ("
+ br.jobsRunning() + " running).");
Thread.sleep(1000);
bc.checkJobs();
}
}
// Verify that each block now has an extra replica.
printFileLocations(file);
fileBlocks =
dfs.getNameNode().getBlockLocations(filePath.toUri().toString(),
0L,
file.getLen()).getLocatedBlocks();
for (LocatedBlock b : fileBlocks) {
assertEquals("block was improperly replicated",
repl+1, b.getLocations().length);
}
bc.updateStatus();
assertEquals("unexpected copy failures occurred",
0, br.getNumFileCopyFailures());
assertEquals("unexpected number of file copy operations",
numBlocks, br.getNumFilesCopied());