final int repCount = 3;
// We use the regionserver file system & conf as we expect it to have the hook.
conf = targetRs.getConfiguration();
HFileSystem rfs = (HFileSystem) targetRs.getFileSystem();
Table h = htu.createTable("table".getBytes(), sb);
// Now, we have 4 datanodes and a replication count of 3. So we don't know if the datanode
// with the same node will be used. We can't really stop an existing datanode, this would
// make us fall in nasty hdfs bugs/issues. So we're going to try multiple times.
// Now we need to find the log file, its locations, and look at it
String rootDir = new Path(FSUtils.getRootDir(conf) + "/" + HConstants.HREGION_LOGDIR_NAME +
"/" + targetRs.getServerName().toString()).toUri().getPath();
DistributedFileSystem mdfs = (DistributedFileSystem)
hbm.getMaster().getMasterFileSystem().getFileSystem();
int nbTest = 0;
while (nbTest < 10) {
htu.getHBaseAdmin().rollHLogWriter(targetRs.getServerName().toString());
// We need a sleep as the namenode is informed asynchronously
Thread.sleep(100);
// insert one put to ensure a minimal size
Put p = new Put(sb);
p.add(sb, sb, sb);
h.put(p);
DirectoryListing dl = dfs.getClient().listPaths(rootDir, HdfsFileStatus.EMPTY_NAME);
HdfsFileStatus[] hfs = dl.getPartialListing();
// As we wrote a put, we should have at least one log file.