final int nameNodePort = cluster.getNameNodePort();
// Get a reference to its DFS directly
FileSystem fs = cluster.getFileSystem();
assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem directDfs = (DistributedFileSystem) fs;
// Get another reference via network using a specific socket factory
Configuration cconf = new Configuration();
FileSystem.setDefaultUri(cconf, String.format("hdfs://localhost:%s/",
nameNodePort + 10));
cconf.set("hadoop.rpc.socket.factory.class.default",
"org.apache.hadoop.ipc.DummySocketFactory");
cconf.set("hadoop.rpc.socket.factory.class.ClientProtocol",
"org.apache.hadoop.ipc.DummySocketFactory");
cconf.set("hadoop.rpc.socket.factory.class.JobSubmissionProtocol",
"org.apache.hadoop.ipc.DummySocketFactory");
fs = FileSystem.get(cconf);
assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem dfs = (DistributedFileSystem) fs;
JobClient client = null;
try {
// This will test RPC to the NameNode only.
// could we test Client-DataNode connections?
Path filePath = new Path("/dir");
assertFalse(directDfs.exists(filePath));
assertFalse(dfs.exists(filePath));
directDfs.mkdirs(filePath);
assertTrue(directDfs.exists(filePath));
assertTrue(dfs.exists(filePath));
// This will test TPC to a JobTracker
MiniMRCluster mr = new MiniMRCluster(1, fs.getUri().toString(), 1);
final int jobTrackerPort = mr.getJobTrackerPort();
JobConf jconf = new JobConf(cconf);
jconf.set("mapred.job.tracker", String.format("localhost:%d",
jobTrackerPort + 10));
client = new JobClient(jconf);
JobStatus[] jobs = client.jobsToComplete();
assertTrue(jobs.length == 0);
} finally {
try {
if (client != null)
client.close();
} catch (Exception ignored) {
// nothing we can do
ignored.printStackTrace();
}
try {
if (dfs != null)
dfs.close();
} catch (Exception ignored) {
// nothing we can do
ignored.printStackTrace();
}