@Test
public void testBalancerBasicScenario() throws IOException {
Path balancerTempDir = null;
try {
List<DNClient> testnodes = reserveDatanodesForTest(2);
DNClient testnode1 = testnodes.get(0);
DNClient testnode2 = testnodes.get(1);
shutdownNonTestNodes(testnodes);
LOG.info("attempting to kill both test nodes");
stopDatanode(testnode1);
stopDatanode(testnode2);
LOG.info("starting up datanode ["+
testnode1.getHostName()+
"] and loading it with data");
startDatanode(testnode1);
// mkdir balancer-temp
balancerTempDir = makeTempDir();
// write 2 blocks to file system
LOG.info("generating filesystem load");
// TODO spec blocks to generate by blockCount, blockSize, # of writers
generateFileSystemLoad(2); // generate 2 blocks of test data
LOG.info("measure space used on 1st node");
long usedSpace0 = getDatanodeUsedSpace(testnode1);
LOG.info("datanode " + testnode1.getHostName()
+ " contains " + usedSpace0 + " bytes");
LOG.info("bring up a 2nd node and run balancer on DFS");
startDatanode(testnode2);
runBalancerAndVerify(testnodes);
} catch (Throwable t) {
LOG.info("method testBalancer failed", t);
} finally {
// finally block to run cleanup
LOG.info("clean off test data from DFS [rmr ~/balancer-temp]");
try {
deleteTempDir(balancerTempDir);
} catch (Exception e) {
LOG.warn("problem cleaning up temp dir", e);
}
// restart killed nodes
Iterator<DNClient> iter = dfsCluster.getDNClients().iterator();
while (iter.hasNext()) {
DNClient dn = iter.next();
startDatanode( dn );
}
}
}