job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(NullWritable.class);
int returnVal = 0;
RunningJob rj = null;
boolean noName = StringUtils.isEmpty(HiveConf.getVar(job,
HiveConf.ConfVars.HADOOPJOBNAME));
String jobName = null;
if (noName && this.getQueryPlan() != null) {
int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(),
maxlen - 6);
}
if (noName) {
// This is for a special case to ensure unit tests pass
HiveConf.setVar(job, HiveConf.ConfVars.HADOOPJOBNAME,
jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
}
// pass aggregation key to mapper
HiveConf.setVar(job,
HiveConf.ConfVars.HIVE_STATS_KEY_PREFIX,
work.getAggKey());
try {
addInputPaths(job, work);
MapredWork mrWork = new MapredWork();
mrWork.setMapWork(work);
Utilities.setMapRedWork(job, mrWork, ctx.getMRTmpFileURI());
// remove the pwd from conf file so that job tracker doesn't show this
// logs
String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
if (pwd != null) {
HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
}
JobClient jc = new JobClient(job);
String addedJars = Utilities.getResourceFiles(job, SessionState.ResourceType.JAR);
if (!addedJars.isEmpty()) {
job.set("tmpjars", addedJars);
}
// make this client wait if job trcker is not behaving well.
Throttle.checkJobTracker(job, LOG);
if (work.isGatheringStats()) {
// initialize stats publishing table
StatsPublisher statsPublisher;
String statsImplementationClass = HiveConf.getVar(job, HiveConf.ConfVars.HIVESTATSDBCLASS);
if (StatsFactory.setImplementation(statsImplementationClass, job)) {
statsPublisher = StatsFactory.getStatsPublisher();
if (!statsPublisher.init(job)) { // creating stats table if not exists
if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
throw
new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());
}
}
}
}
// Finally SUBMIT the JOB!
rj = jc.submitJob(job);
returnVal = jobExecHelper.progress(rj, jc);
success = (returnVal == 0);
} catch (Exception e) {
e.printStackTrace();
String mesg = " with exception '" + Utilities.getNameMessage(e) + "'";
if (rj != null) {
mesg = "Ended Job = " + rj.getJobID() + mesg;
} else {
mesg = "Job Submission failed" + mesg;
}
// Has to use full name to make sure it does not conflict with
// org.apache.commons.lang.StringUtils
console.printError(mesg, "\n"
+ org.apache.hadoop.util.StringUtils.stringifyException(e));
success = false;
returnVal = 1;
} finally {
try {
if (ctxCreated) {
ctx.clear();
}
if (rj != null) {
if (returnVal != 0) {
rj.killJob();
}
HadoopJobExecHelper.runningJobKillURIs.remove(rj.getJobID());
jobID = rj.getID().toString();
}
} catch (Exception e) {
}
}