return null;
}
@SuppressWarnings("nls")
private void genMapRedTasks(QB qb) throws SemanticException {
FetchWork fetch = null;
List<Task<? extends Serializable>> mvTask = new ArrayList<Task<? extends Serializable>>();
FetchTask fetchTask = null;
QBParseInfo qbParseInfo = qb.getParseInfo();
// Does this query need reduce job
if (qb.isSelectStarQuery() && qbParseInfo.getDestToClusterBy().isEmpty()
&& qbParseInfo.getDestToDistributeBy().isEmpty()
&& qbParseInfo.getDestToOrderBy().isEmpty()
&& qbParseInfo.getDestToSortBy().isEmpty()) {
boolean noMapRed = false;
Iterator<Map.Entry<String, Table>> iter = qb.getMetaData()
.getAliasToTable().entrySet().iterator();
Table tab = (iter.next()).getValue();
if (!tab.isPartitioned()) {
if (qbParseInfo.getDestToWhereExpr().isEmpty()) {
fetch = new FetchWork(tab.getPath().toString(), Utilities
.getTableDesc(tab), qb.getParseInfo().getOuterQueryLimit());
noMapRed = true;
inputs.add(new ReadEntity(tab));
}
} else {
if (topOps.size() == 1) {
TableScanOperator ts = (TableScanOperator) topOps.values().toArray()[0];
// check if the pruner only contains partition columns
if (PartitionPruner.onlyContainsPartnCols(topToTable.get(ts),
opToPartPruner.get(ts))) {
PrunedPartitionList partsList = null;
try {
partsList = opToPartList.get(ts);
if (partsList == null) {
partsList = PartitionPruner.prune(topToTable.get(ts),
opToPartPruner.get(ts), conf, (String) topOps.keySet()
.toArray()[0], prunedPartitions);
opToPartList.put(ts, partsList);
}
} catch (HiveException e) {
// Has to use full name to make sure it does not conflict with
// org.apache.commons.lang.StringUtils
LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
throw new SemanticException(e.getMessage(), e);
}
// If there is any unknown partition, create a map-reduce job for
// the filter to prune correctly
if ((partsList.getUnknownPartns().size() == 0)) {
List<String> listP = new ArrayList<String>();
List<PartitionDesc> partP = new ArrayList<PartitionDesc>();
Set<Partition> parts = partsList.getConfirmedPartns();
Iterator<Partition> iterParts = parts.iterator();
while (iterParts.hasNext()) {
Partition part = iterParts.next();
listP.add(part.getPartitionPath().toString());
try {
partP.add(Utilities.getPartitionDesc(part));
} catch (HiveException e) {
throw new SemanticException(e.getMessage(), e);
}
inputs.add(new ReadEntity(part));
}
fetch = new FetchWork(listP, partP, qb.getParseInfo()
.getOuterQueryLimit());
noMapRed = true;
}
}
}
}
if (noMapRed) {
if (fetch.getTblDesc() != null) {
PlanUtils.configureTableJobPropertiesForStorageHandler(
fetch.getTblDesc());
}
fetchTask = (FetchTask) TaskFactory.get(fetch, conf);
setFetchTask(fetchTask);
// remove root tasks if any
rootTasks.clear();
return;
}
}
// determine the query qualifies reduce input size for LIMIT
// The query only qualifies when there are only one top operator
// and there is no transformer or UDTF and no block sampling
// is used.
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVELIMITOPTENABLE)
&& ctx.getTryCount() == 0 && topOps.size() == 1
&& !globalLimitCtx.ifHasTransformOrUDTF() &&
nameToSplitSample.isEmpty()) {
// Here we recursively check:
// 1. whether there are exact one LIMIT in the query
// 2. whether there is no aggregation, group-by, distinct, sort by,
// distributed by, or table sampling in any of the sub-query.
// The query only qualifies if both conditions are satisfied.
//
// Example qualified queries:
// CREATE TABLE ... AS SELECT col1, col2 FROM tbl LIMIT ..
// INSERT OVERWRITE TABLE ... SELECT col1, hash(col2), split(col1)
// FROM ... LIMIT...
// SELECT * FROM (SELECT col1 as col2 (SELECT * FROM ...) t1 LIMIT ...) t2);
//
Integer tempGlobalLimit = checkQbpForGlobalLimit(qb);
// query qualify for the optimization
if (tempGlobalLimit != null && tempGlobalLimit != 0) {
TableScanOperator ts = (TableScanOperator) topOps.values().toArray()[0];
Table tab = topToTable.get(ts);
if (!tab.isPartitioned()) {
if (qbParseInfo.getDestToWhereExpr().isEmpty()) {
globalLimitCtx.enableOpt(tempGlobalLimit);
}
} else {
// check if the pruner only contains partition columns
if (PartitionPruner.onlyContainsPartnCols(tab,
opToPartPruner.get(ts))) {
PrunedPartitionList partsList = null;
try {
partsList = opToPartList.get(ts);
if (partsList == null) {
partsList = PartitionPruner.prune(tab,
opToPartPruner.get(ts), conf, (String) topOps.keySet()
.toArray()[0], prunedPartitions);
opToPartList.put(ts, partsList);
}
} catch (HiveException e) {
// Has to use full name to make sure it does not conflict with
// org.apache.commons.lang.StringUtils
LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
throw new SemanticException(e.getMessage(), e);
}
// If there is any unknown partition, create a map-reduce job for
// the filter to prune correctly
if ((partsList.getUnknownPartns().size() == 0)) {
globalLimitCtx.enableOpt(tempGlobalLimit);
}
}
}
if (globalLimitCtx.isEnable()) {
LOG.info("Qualify the optimize that reduces input size for 'limit' for limit "
+ globalLimitCtx.getGlobalLimit());
}
}
}
// In case of a select, use a fetch task instead of a move task
if (qb.getIsQuery()) {
if ((!loadTableWork.isEmpty()) || (loadFileWork.size() != 1)) {
throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg());
}
String cols = loadFileWork.get(0).getColumns();
String colTypes = loadFileWork.get(0).getColumnTypes();
String resFileFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT);
TableDesc resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, resFileFormat);
fetch = new FetchWork(new Path(loadFileWork.get(0).getSourceDir()).toString(),
resultTab, qb.getParseInfo().getOuterQueryLimit());
fetchTask = (FetchTask) TaskFactory.get(fetch, conf);
setFetchTask(fetchTask);