TableDesc resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, resFileFormat);
FetchWork fetch = new FetchWork(new Path(loadFileWork.get(0).getSourceDir()).toString(),
resultTab, qb.getParseInfo().getOuterQueryLimit());
FetchTask fetchTask = (FetchTask) TaskFactory.get(fetch, conf);
setFetchTask(fetchTask);
// For the FetchTask, the limit optimiztion requires we fetch all the rows
// in memory and count how many rows we get. It's not practical if the
// limit factor is too big
int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITOPTMAXFETCH);
if (globalLimitCtx.isEnable() && globalLimitCtx.getGlobalLimit() > fetchLimit) {
LOG.info("For FetchTask, LIMIT " + globalLimitCtx.getGlobalLimit() + " > " + fetchLimit
+ ". Doesn't qualify limit optimiztion.");
globalLimitCtx.disableOpt();
}
} else if (!isCStats) {
for (LoadTableDesc ltd : loadTableWork) {
Task<MoveWork> tsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false),
conf);
mvTask.add(tsk);
// Check to see if we are stale'ing any indexes and auto-update them if we want
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEINDEXAUTOUPDATE)) {
IndexUpdater indexUpdater = new IndexUpdater(loadTableWork, getInputs(), conf);
try {
List<Task<? extends Serializable>> indexUpdateTasks = indexUpdater.generateUpdateTasks();
for (Task<? extends Serializable> updateTask : indexUpdateTasks) {
tsk.addDependentTask(updateTask);
}
} catch (HiveException e) {
console.printInfo("WARNING: could not auto-update stale indexes, indexes are not in of sync");
}
}
}
boolean oneLoadFile = true;
for (LoadFileDesc lfd : loadFileWork) {
if (qb.isCTAS()) {
assert (oneLoadFile); // should not have more than 1 load file for
// CTAS
// make the movetask's destination directory the table's destination.
String location = qb.getTableDesc().getLocation();
if (location == null) {
// get the table's default location
Table dumpTable;
Path targetPath;
try {
dumpTable = db.newTable(qb.getTableDesc().getTableName());
if (!db.databaseExists(dumpTable.getDbName())) {
throw new SemanticException("ERROR: The database " + dumpTable.getDbName()
+ " does not exist.");
}
Warehouse wh = new Warehouse(conf);
targetPath = wh.getTablePath(db.getDatabase(dumpTable.getDbName()), dumpTable
.getTableName());
} catch (HiveException e) {
throw new SemanticException(e);
} catch (MetaException e) {
throw new SemanticException(e);
}
location = targetPath.toString();
}
lfd.setTargetDir(location);
oneLoadFile = false;
}
mvTask.add(TaskFactory.get(new MoveWork(null, null, null, lfd, false),
conf));
}
}
// generate map reduce plans
ParseContext tempParseContext = getParseContext();
GenMRProcContext procCtx = new GenMRProcContext(
conf,
new HashMap<Operator<? extends OperatorDesc>, Task<? extends Serializable>>(),
new ArrayList<Operator<? extends OperatorDesc>>(), tempParseContext,
mvTask, rootTasks,
new LinkedHashMap<Operator<? extends OperatorDesc>, GenMapRedCtx>(),
inputs, outputs);
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack.
// The dispatcher generates the plan from the operator tree
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
opRules.put(new RuleRegExp(new String("R1"),
TableScanOperator.getOperatorName() + "%"),
new GenMRTableScan1());
opRules.put(new RuleRegExp(new String("R2"),
TableScanOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"),
new GenMRRedSink1());
opRules.put(new RuleRegExp(new String("R3"),
ReduceSinkOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"),
new GenMRRedSink2());
opRules.put(new RuleRegExp(new String("R4"),
FileSinkOperator.getOperatorName() + "%"),
new GenMRFileSink1());
opRules.put(new RuleRegExp(new String("R5"),
UnionOperator.getOperatorName() + "%"),
new GenMRUnion1());
opRules.put(new RuleRegExp(new String("R6"),
UnionOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"),
new GenMRRedSink3());
opRules.put(new RuleRegExp(new String("R6"),
MapJoinOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"),
new GenMRRedSink4());
opRules.put(new RuleRegExp(new String("R7"),
TableScanOperator.getOperatorName() + "%.*" + MapJoinOperator.getOperatorName() + "%"),
MapJoinFactory.getTableScanMapJoin());
opRules.put(new RuleRegExp(new String("R8"),
ReduceSinkOperator.getOperatorName() + "%.*" + MapJoinOperator.getOperatorName() + "%"),
MapJoinFactory.getReduceSinkMapJoin());
opRules.put(new RuleRegExp(new String("R9"),
UnionOperator.getOperatorName() + "%.*" + MapJoinOperator.getOperatorName() + "%"),
MapJoinFactory.getUnionMapJoin());
opRules.put(new RuleRegExp(new String("R10"),
MapJoinOperator.getOperatorName() + "%.*" + MapJoinOperator.getOperatorName() + "%"),
MapJoinFactory.getMapJoinMapJoin());
opRules.put(new RuleRegExp(new String("R11"),
MapJoinOperator.getOperatorName() + "%" + SelectOperator.getOperatorName() + "%"),
MapJoinFactory.getMapJoin());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(new GenMROperator(), opRules,
procCtx);
GraphWalker ogw = new GenMapRedWalker(disp);
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(topOps.values());
ogw.startWalking(topNodes, null);
/* If the query was the result of analyze table column compute statistics rewrite, create
* a column stats task instead of a fetch task to persist stats to the metastore.
*/
if (isCStats) {
genColumnStatsTask(qb);
}
// reduce sink does not have any kids - since the plan by now has been
// broken up into multiple
// tasks, iterate over all tasks.
// For each task, go over all operators recursively
for (Task<? extends Serializable> rootTask : rootTasks) {
breakTaskTree(rootTask);
}
// For each task, set the key descriptor for the reducer
for (Task<? extends Serializable> rootTask : rootTasks) {
setKeyDescTaskTree(rootTask);
}
// If a task contains an operator which instructs bucketizedhiveinputformat
// to be used, please do so
for (Task<? extends Serializable> rootTask : rootTasks) {
setInputFormat(rootTask);
}
PhysicalContext physicalContext = new PhysicalContext(conf,
getParseContext(), ctx, rootTasks, fetchTask);
PhysicalOptimizer physicalOptimizer = new PhysicalOptimizer(
physicalContext, conf);
physicalOptimizer.optimize();
// For each operator, generate the counters if needed
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEJOBPROGRESS)) {
for (Task<? extends Serializable> rootTask : rootTasks) {
generateCountersTask(rootTask);
}
}
decideExecMode(rootTasks, ctx, globalLimitCtx);
if (qb.isCTAS()) {
// generate a DDL task and make it a dependent task of the leaf
CreateTableDesc crtTblDesc = qb.getTableDesc();
crtTblDesc.validate();
// Clear the output for CTAS since we don't need the output from the
// mapredWork, the
// DDLWork at the tail of the chain will have the output
getOutputs().clear();
Task<? extends Serializable> crtTblTask = TaskFactory.get(new DDLWork(
getInputs(), getOutputs(), crtTblDesc), conf);
// find all leaf tasks and make the DDLTask as a dependent task of all of
// them
HashSet<Task<? extends Serializable>> leaves = new HashSet<Task<? extends Serializable>>();
getLeafTasks(rootTasks, leaves);
assert (leaves.size() > 0);
for (Task<? extends Serializable> task : leaves) {
if (task instanceof StatsTask){
//StatsTask require table to already exist
for (Task<? extends Serializable> parentOfStatsTask : task.getParentTasks()){
parentOfStatsTask.addDependentTask(crtTblTask);
}
for (Task<? extends Serializable> parentOfCrtTblTask : crtTblTask.getParentTasks()){
parentOfCrtTblTask.removeDependentTask(task);
}
crtTblTask.addDependentTask(task);
} else {
task.addDependentTask(crtTblTask);
}
}
}
if (globalLimitCtx.isEnable() && fetchTask != null) {
int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITOPTMAXFETCH);
LOG.info("set least row check for FetchTask: " + globalLimitCtx.getGlobalLimit());
fetchTask.getWork().setLeastNumRows(globalLimitCtx.getGlobalLimit());
}
if (globalLimitCtx.isEnable() && globalLimitCtx.getLastReduceLimitDesc() != null) {
LOG.info("set least row check for LimitDesc: " + globalLimitCtx.getGlobalLimit());
globalLimitCtx.getLastReduceLimitDesc().setLeastRows(globalLimitCtx.getGlobalLimit());