Task<? extends Serializable> currTask = ctx.getCurrTask();
FileSinkOperator fsOp = (FileSinkOperator) nd;
boolean isInsertTable = // is INSERT OVERWRITE TABLE
fsOp.getConf().getTableInfo().getTableName() != null &&
parseCtx.getQB().getParseInfo().isInsertToTable();
HiveConf hconf = parseCtx.getConf();
// Has the user enabled merging of files for map-only jobs or for all jobs
if ((ctx.getMvTask() != null) && (!ctx.getMvTask().isEmpty())) {
List<Task<? extends Serializable>> mvTasks = ctx.getMvTask();
// In case of unions or map-joins, it is possible that the file has
// already been seen.
// So, no need to attempt to merge the files again.
if ((ctx.getSeenFileSinkOps() == null)
|| (!ctx.getSeenFileSinkOps().contains(nd))) {
// no need of merging if the move is to a local file system
MoveTask mvTask = (MoveTask) findMoveTask(mvTasks, fsOp);
if (isInsertTable &&
hconf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
addStatsTask(fsOp, mvTask, currTask, parseCtx.getConf());
}
if ((mvTask != null) && !mvTask.isLocal()) {
// There are separate configuration parameters to control whether to
// merge for a map-only job
// or for a map-reduce job
MapredWork currWork = (MapredWork) currTask.getWork();
boolean mergeMapOnly =
hconf.getBoolVar(HiveConf.ConfVars.HIVEMERGEMAPFILES) &&
currWork.getReducer() == null;
boolean mergeMapRed =
hconf.getBoolVar(HiveConf.ConfVars.HIVEMERGEMAPREDFILES) &&
currWork.getReducer() != null;
if (mergeMapOnly || mergeMapRed) {
chDir = true;
}
}