String invalidReason = work.isInvalid();
if (invalidReason != null) {
throw new RuntimeException("Plan invalid, Reason: " + invalidReason);
}
Context ctx = driverContext.getCtx();
boolean ctxCreated = false;
String emptyScratchDirStr;
Path emptyScratchDir;
try {
if (ctx == null) {
ctx = new Context(job);
ctxCreated = true;
}
emptyScratchDirStr = ctx.getMRTmpFileURI();
emptyScratchDir = new Path(emptyScratchDirStr);
FileSystem fs = emptyScratchDir.getFileSystem(job);
fs.mkdirs(emptyScratchDir);
} catch (IOException e) {
e.printStackTrace();
console.printError("Error launching map-reduce job", "\n"
+ org.apache.hadoop.util.StringUtils.stringifyException(e));
return 5;
}
ShimLoader.getHadoopShims().prepareJobOutput(job);
//See the javadoc on HiveOutputFormatImpl and HadoopShims.prepareJobOutput()
job.setOutputFormat(HiveOutputFormatImpl.class);
job.setMapperClass(ExecMapper.class);
job.setMapOutputKeyClass(HiveKey.class);
job.setMapOutputValueClass(BytesWritable.class);
try {
job.setPartitionerClass((Class<? extends Partitioner>) (Class.forName(HiveConf.getVar(job,
HiveConf.ConfVars.HIVEPARTITIONER))));
} catch (ClassNotFoundException e) {
throw new RuntimeException(e.getMessage());
}
if (work.getNumMapTasks() != null) {
job.setNumMapTasks(work.getNumMapTasks().intValue());
}
if (work.getMaxSplitSize() != null) {
HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, work.getMaxSplitSize().longValue());
}
if (work.getMinSplitSize() != null) {
HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work.getMinSplitSize().longValue());
}
if (work.getMinSplitSizePerNode() != null) {
HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERNODE, work.getMinSplitSizePerNode().longValue());
}
if (work.getMinSplitSizePerRack() != null) {
HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERRACK, work.getMinSplitSizePerRack().longValue());
}
job.setNumReduceTasks(work.getNumReduceTasks().intValue());
job.setReducerClass(ExecReducer.class);
// set input format information if necessary
setInputAttributes(job);
// Turn on speculative execution for reducers
boolean useSpeculativeExecReducers = HiveConf.getBoolVar(job,
HiveConf.ConfVars.HIVESPECULATIVEEXECREDUCERS);
HiveConf.setBoolVar(job, HiveConf.ConfVars.HADOOPSPECULATIVEEXECREDUCERS,
useSpeculativeExecReducers);
String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT);
if ((inpFormat == null) || (!StringUtils.isNotBlank(inpFormat))) {
inpFormat = ShimLoader.getHadoopShims().getInputFormatClassName();
}
if (getWork().isUseBucketizedHiveInputFormat()) {
inpFormat = BucketizedHiveInputFormat.class.getName();
}
LOG.info("Using " + inpFormat);
try {
job.setInputFormat((Class<? extends InputFormat>) (Class.forName(inpFormat)));
} catch (ClassNotFoundException e) {
throw new RuntimeException(e.getMessage());
}
// No-Op - we don't really write anything here ..
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// Transfer HIVEAUXJARS and HIVEADDEDJARS to "tmpjars" so hadoop understands
// it
String auxJars = HiveConf.getVar(job, HiveConf.ConfVars.HIVEAUXJARS);
String addedJars = HiveConf.getVar(job, HiveConf.ConfVars.HIVEADDEDJARS);
if (StringUtils.isNotBlank(auxJars) || StringUtils.isNotBlank(addedJars)) {
String allJars = StringUtils.isNotBlank(auxJars) ? (StringUtils.isNotBlank(addedJars) ? addedJars
+ "," + auxJars
: auxJars)
: addedJars;
LOG.info("adding libjars: " + allJars);
initializeFiles("tmpjars", allJars);
}
// Transfer HIVEADDEDFILES to "tmpfiles" so hadoop understands it
String addedFiles = HiveConf.getVar(job, HiveConf.ConfVars.HIVEADDEDFILES);
if (StringUtils.isNotBlank(addedFiles)) {
initializeFiles("tmpfiles", addedFiles);
}
int returnVal = 0;
RunningJob rj = null;
boolean noName = StringUtils.isEmpty(HiveConf.getVar(job, HiveConf.ConfVars.HADOOPJOBNAME));
if (noName) {
// This is for a special case to ensure unit tests pass
HiveConf.setVar(job, HiveConf.ConfVars.HADOOPJOBNAME, "JOB" + Utilities.randGen.nextInt());
}
String addedArchives = HiveConf.getVar(job, HiveConf.ConfVars.HIVEADDEDARCHIVES);
// Transfer HIVEADDEDARCHIVES to "tmparchives" so hadoop understands it
if (StringUtils.isNotBlank(addedArchives)) {
initializeFiles("tmparchives", addedArchives);
}
try{
MapredLocalWork localwork = work.getMapLocalWork();
if (localwork != null) {
if (!ShimLoader.getHadoopShims().isLocalMode(job)) {
Path localPath = new Path(localwork.getTmpFileURI());
Path hdfsPath = new Path(work.getTmpHDFSFileURI());
FileSystem hdfs = hdfsPath.getFileSystem(job);
FileSystem localFS = localPath.getFileSystem(job);
FileStatus[] hashtableFiles = localFS.listStatus(localPath);
int fileNumber = hashtableFiles.length;
String[] fileNames = new String[fileNumber];
for ( int i = 0; i < fileNumber; i++){
fileNames[i] = hashtableFiles[i].getPath().getName();
}
//package and compress all the hashtable files to an archive file
String parentDir = localPath.toUri().getPath();
String stageId = this.getId();
String archiveFileURI = Utilities.generateTarURI(parentDir, stageId);
String archiveFileName = Utilities.generateTarFileName(stageId);
localwork.setStageID(stageId);
CompressionUtils.tar(parentDir, fileNames,archiveFileName);
Path archivePath = new Path(archiveFileURI);
LOG.info("Archive "+ hashtableFiles.length+" hash table files to " + archiveFileURI);
//upload archive file to hdfs
String hdfsFile =Utilities.generateTarURI(hdfsPath, stageId);
Path hdfsFilePath = new Path(hdfsFile);
short replication = (short) job.getInt("mapred.submit.replication", 10);
hdfs.setReplication(hdfsFilePath, replication);
hdfs.copyFromLocalFile(archivePath, hdfsFilePath);
LOG.info("Upload 1 archive file from" + archivePath + " to: " + hdfsFilePath);
//add the archive file to distributed cache
DistributedCache.createSymlink(job);
DistributedCache.addCacheArchive(hdfsFilePath.toUri(), job);
LOG.info("Add 1 archive file to distributed cache. Archive file: " + hdfsFilePath.toUri());
}
}
addInputPaths(job, work, emptyScratchDirStr, ctx);
Utilities.setMapRedWork(job, work, ctx.getMRTmpFileURI());
// remove the pwd from conf file so that job tracker doesn't show this
// logs
String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
if (pwd != null) {
HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
}
JobClient jc = new JobClient(job);
// make this client wait if job trcker is not behaving well.
Throttle.checkJobTracker(job, LOG);
if (work.isGatheringStats()) {
// initialize stats publishing table
StatsPublisher statsPublisher;
String statsImplementationClass = HiveConf.getVar(job, HiveConf.ConfVars.HIVESTATSDBCLASS);
if (StatsFactory.setImplementation(statsImplementationClass, job)) {
statsPublisher = StatsFactory.getStatsPublisher();
if (!statsPublisher.init(job)) { // creating stats table if not exists
if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
throw
new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());
}
}
}
}
this.createTmpDirs();
// Finally SUBMIT the JOB!
rj = jc.submitJob(job);
// replace it back
if (pwd != null) {
HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, pwd);
}
returnVal = jobExecHelper.progress(rj, jc);
success = (returnVal == 0);
} catch (Exception e) {
e.printStackTrace();
String mesg = " with exception '" + Utilities.getNameMessage(e) + "'";
if (rj != null) {
mesg = "Ended Job = " + rj.getJobID() + mesg;
} else {
mesg = "Job Submission failed" + mesg;
}
// Has to use full name to make sure it does not conflict with
// org.apache.commons.lang.StringUtils
console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
success = false;
returnVal = 1;
} finally {
Utilities.clearMapRedWork(job);
try {
if (ctxCreated) {
ctx.clear();
}
if (rj != null) {
if (returnVal != 0) {
rj.killJob();