if (!tab.isPartitioned()) {
if (qbParseInfo.getDestToWhereExpr().isEmpty()) {
fetch = new FetchWork(tab.getPath().toString(), Utilities
.getTableDesc(tab), qb.getParseInfo().getOuterQueryLimit());
noMapRed = true;
inputs.add(new ReadEntity(tab));
}
} else {
if (topOps.size() == 1) {
TableScanOperator ts = (TableScanOperator) topOps.values().toArray()[0];
// check if the pruner only contains partition columns
if (PartitionPruner.onlyContainsPartnCols(topToTable.get(ts),
opToPartPruner.get(ts))) {
PrunedPartitionList partsList = null;
try {
partsList = opToPartList.get(ts);
if (partsList == null) {
partsList = PartitionPruner.prune(topToTable.get(ts),
opToPartPruner.get(ts), conf, (String) topOps.keySet()
.toArray()[0], prunedPartitions);
opToPartList.put(ts, partsList);
}
} catch (HiveException e) {
// Has to use full name to make sure it does not conflict with
// org.apache.commons.lang.StringUtils
LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
throw new SemanticException(e.getMessage(), e);
}
// If there is any unknown partition, create a map-reduce job for
// the filter to prune correctly
if ((partsList.getUnknownPartns().size() == 0)) {
List<String> listP = new ArrayList<String>();
List<PartitionDesc> partP = new ArrayList<PartitionDesc>();
Set<Partition> parts = partsList.getConfirmedPartns();
Iterator<Partition> iterParts = parts.iterator();
while (iterParts.hasNext()) {
Partition part = iterParts.next();
listP.add(part.getPartitionPath().toString());
try {
partP.add(Utilities.getPartitionDesc(part));
} catch (HiveException e) {
throw new SemanticException(e.getMessage(), e);
}
inputs.add(new ReadEntity(part));
}
fetch = new FetchWork(listP, partP, qb.getParseInfo()
.getOuterQueryLimit());
noMapRed = true;