}
return this.callBackObj.checkFatalErrors(ctrs, errMsg);
}
private MapRedStats progress(ExecDriverTaskHandle th) throws IOException {
JobClient jc = th.getJobClient();
RunningJob rj = th.getRunningJob();
String lastReport = "";
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
//DecimalFormat longFormatter = new DecimalFormat("###,###");
long reportTime = System.currentTimeMillis();
long maxReportInterval =
HiveConf.getLongVar(job, HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL);
boolean fatal = false;
StringBuilder errMsg = new StringBuilder();
long pullInterval = HiveConf.getLongVar(job, HiveConf.ConfVars.HIVECOUNTERSPULLINTERVAL);
boolean initializing = true;
boolean initOutputPrinted = false;
long cpuMsec = -1;
int numMap = -1;
int numReduce = -1;
List<ClientStatsPublisher> clientStatPublishers = getClientStatPublishers();
while (!rj.isComplete()) {
try {
Thread.sleep(pullInterval);
} catch (InterruptedException e) {
}
if (initializing && ShimLoader.getHadoopShims().isJobPreparing(rj)) {
// No reason to poll untill the job is initialized
continue;
} else {
// By now the job is initialized so no reason to do
// rj.getJobState() again and we do not want to do an extra RPC call
initializing = false;
}
if (!initOutputPrinted) {
SessionState ss = SessionState.get();
String logMapper;
String logReducer;
TaskReport[] mappers = jc.getMapTaskReports(rj.getJobID());
if (mappers == null) {
logMapper = "no information for number of mappers; ";
} else {
numMap = mappers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
}
logMapper = "number of mappers: " + numMap + "; ";
}
TaskReport[] reducers = jc.getReduceTaskReports(rj.getJobID());
if (reducers == null) {
logReducer = "no information for number of reducers. ";
} else {
numReduce = reducers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
}
logReducer = "number of reducers: " + numReduce;
}
console
.printInfo("Hadoop job information for " + getId() + ": " + logMapper + logReducer);
initOutputPrinted = true;
}
RunningJob newRj = jc.getJob(rj.getJobID());
if (newRj == null) {
// under exceptional load, hadoop may not be able to look up status
// of finished jobs (because it has purged them from memory). From
// hive's perspective - it's equivalent to the job having failed.
// So raise a meaningful exception