@Test
public void testAccessToKillJob() throws Exception {
Job job = submitSleepJob(1, 1, 100, 100, false, "u1,g1", "p1"
+ NAME_SEPARATOR + "p11", conf);
final JobConf jobConf = miniMRCluster.createJobConf();
Cluster cluster = null;
JobID jobID = job.getStatus().getJobID();
//Ensure that the jobinprogress is initied before we issue a kill
//signal to the job.
JobTracker tracker = miniMRCluster.getJobTrackerRunner().getJobTracker();
JobInProgress jip = tracker.getJob(org.apache.hadoop.mapred.JobID
.downgrade(jobID));
tracker.initJob(jip);
try {
final Configuration userConf =
new Configuration(miniMRCluster.createJobConf());
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting("someRandomUser",
new String[] { "someRandomGroup" });
cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
public Cluster run() throws IOException {
return new Cluster(userConf);
}
});
cluster.getJob(jobID).killJob();
fail("user 'someRandomeUser' is neither u1 nor in the administer group list");
} catch (Exception e) {
final Configuration userConf = new Configuration(miniMRCluster.createJobConf());
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting("u1",new String[]{"g1"});
cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
public Cluster run() throws IOException {
return new Cluster(userConf);
}
});
cluster.getJob(jobID).killJob();
// kill the running job
assertEquals("job submitted for u1 and queue p1:p11 is not killed.",
cluster.getJob(jobID).getStatus().getState(), (State.KILLED));
}
job = submitSleepJob(1, 1, 100, 100, false, "u1,g1", "p1" + NAME_SEPARATOR
+ "p12", conf);
jobID = job.getStatus().getJobID();
//Ensure that the jobinprogress is initied before we issue a kill
//signal to the job.
jip = tracker.getJob(org.apache.hadoop.mapred.JobID.downgrade(jobID));
tracker.initJob(jip);
tracker.killJob(job.getJobID());
// kill the job by the user who submitted the job
assertEquals("job submitted for u1 and queue p1:p11 is not killed.",
cluster.getJob(jobID).getStatus().getState(), (State.KILLED));
final Configuration userConf = new Configuration(miniMRCluster.createJobConf());
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting("u1",new String[]{"g1"});
cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
public Cluster run() throws IOException {
return new Cluster(userConf);
}
});
job = submitSleepJob(1, 1, 10, 10, false, "u1,g1", "p1" + NAME_SEPARATOR
+ "p11", conf);
jobID = job.getStatus().getJobID();
//Ensure that the jobinprogress is initied before we issue a kill
//signal to the job.
jip = tracker.getJob(org.apache.hadoop.mapred.JobID.downgrade(jobID));
tracker.initJob(jip);
ugi =
UserGroupInformation.createUserForTesting("u3",new String[]{"g3"});
cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
public Cluster run() throws IOException {
return new Cluster(jobConf);
}
});
// try killing job with user not in administer list
try {
cluster.getJob(jobID).killJob();
fail("u3 not in administer list");
} catch (Exception e) {
ugi =
UserGroupInformation.createUserForTesting("u1",new String[]{"g1"});
cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
public Cluster run() throws IOException {
return new Cluster(jobConf);
}
});
assertFalse(cluster.getJob(jobID).isComplete());
cluster.getJob(jobID).killJob();
// kill the running job
assertEquals("job submitted for u1 and queue p1:p11 is not killed.",
cluster.getJob(jobID).getStatus().getState(), (State.KILLED));
}
}