inputs.add(new ReadEntity(tab));
}
} else {
if (topOps.size() == 1) {
TableScanOperator ts = (TableScanOperator) topOps.values().toArray()[0];
// check if the pruner only contains partition columns
if (PartitionPruner.onlyContainsPartnCols(topToTable.get(ts),
opToPartPruner.get(ts))) {
PrunedPartitionList partsList = null;
try {
partsList = opToPartList.get(ts);
if (partsList == null) {
partsList = PartitionPruner.prune(topToTable.get(ts),
opToPartPruner.get(ts), conf, (String) topOps.keySet()
.toArray()[0], prunedPartitions);
opToPartList.put(ts, partsList);
}
} catch (HiveException e) {
// Has to use full name to make sure it does not conflict with
// org.apache.commons.lang.StringUtils
LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
throw new SemanticException(e.getMessage(), e);
}
// If there is any unknown partition, create a map-reduce job for
// the filter to prune correctly
if ((partsList.getUnknownPartns().size() == 0)) {
List<String> listP = new ArrayList<String>();
List<PartitionDesc> partP = new ArrayList<PartitionDesc>();
Set<Partition> parts = partsList.getConfirmedPartns();
Iterator<Partition> iterParts = parts.iterator();
while (iterParts.hasNext()) {
Partition part = iterParts.next();
listP.add(part.getPartitionPath().toString());
try {
partP.add(Utilities.getPartitionDesc(part));
} catch (HiveException e) {
throw new SemanticException(e.getMessage(), e);
}
inputs.add(new ReadEntity(part));
}
fetch = new FetchWork(listP, partP, qb.getParseInfo()
.getOuterQueryLimit());
noMapRed = true;
}
}
}
}
if (noMapRed) {
if (fetch.getTblDesc() != null) {
PlanUtils.configureInputJobPropertiesForStorageHandler(
fetch.getTblDesc());
} else if ( (fetch.getPartDesc() != null) && (!fetch.getPartDesc().isEmpty())){
PartitionDesc pd0 = fetch.getPartDesc().get(0);
TableDesc td = pd0.getTableDesc();
if ((td != null)&&(td.getProperties() != null)
&& td.getProperties().containsKey(
org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE)){
PlanUtils.configureInputJobPropertiesForStorageHandler(td);
}
}
fetchTask = (FetchTask) TaskFactory.get(fetch, conf);
setFetchTask(fetchTask);
// remove root tasks if any
rootTasks.clear();
return;
}
}
// determine the query qualifies reduce input size for LIMIT
// The query only qualifies when there are only one top operator
// and there is no transformer or UDTF and no block sampling
// is used.
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVELIMITOPTENABLE)
&& ctx.getTryCount() == 0 && topOps.size() == 1
&& !globalLimitCtx.ifHasTransformOrUDTF() &&
nameToSplitSample.isEmpty()) {
// Here we recursively check:
// 1. whether there are exact one LIMIT in the query
// 2. whether there is no aggregation, group-by, distinct, sort by,
// distributed by, or table sampling in any of the sub-query.
// The query only qualifies if both conditions are satisfied.
//
// Example qualified queries:
// CREATE TABLE ... AS SELECT col1, col2 FROM tbl LIMIT ..
// INSERT OVERWRITE TABLE ... SELECT col1, hash(col2), split(col1)
// FROM ... LIMIT...
// SELECT * FROM (SELECT col1 as col2 (SELECT * FROM ...) t1 LIMIT ...) t2);
//
Integer tempGlobalLimit = checkQbpForGlobalLimit(qb);
// query qualify for the optimization
if (tempGlobalLimit != null && tempGlobalLimit != 0) {
TableScanOperator ts = (TableScanOperator) topOps.values().toArray()[0];
Table tab = topToTable.get(ts);
if (!tab.isPartitioned()) {
if (qbParseInfo.getDestToWhereExpr().isEmpty()) {
globalLimitCtx.enableOpt(tempGlobalLimit);