Iterator<Map.Entry<String, Table>> iter = qb.getMetaData().getAliasToTable().entrySet().iterator();
Table tab = ((Map.Entry<String, Table>)iter.next()).getValue();
if (!tab.isPartitioned()) {
if (qbParseInfo.getDestToWhereExpr().isEmpty())
fetch = new fetchWork(tab.getPath().toString(), Utilities.getTableDesc(tab), qb.getParseInfo().getOuterQueryLimit());
inputs.add(new ReadEntity(tab));
}
else {
if (aliasToPruner.size() == 1) {
Iterator<Map.Entry<String, org.apache.hadoop.hive.ql.parse.ASTPartitionPruner>> iterP =
aliasToPruner.entrySet().iterator();
org.apache.hadoop.hive.ql.parse.ASTPartitionPruner pr =
((Map.Entry<String, org.apache.hadoop.hive.ql.parse.ASTPartitionPruner>)iterP.next()).getValue();
if (pr.onlyContainsPartitionCols()) {
List<String> listP = new ArrayList<String>();
List<partitionDesc> partP = new ArrayList<partitionDesc>();
PrunedPartitionList partsList = null;
Set<Partition> parts = null;
try {
partsList = pr.prune();
// If there is any unknown partition, create a map-reduce job for the filter to prune correctly
if (partsList.getUnknownPartns().size() == 0) {
parts = partsList.getConfirmedPartns();
Iterator<Partition> iterParts = parts.iterator();
while (iterParts.hasNext()) {
Partition part = iterParts.next();
listP.add(part.getPartitionPath().toString());
partP.add(Utilities.getPartitionDesc(part));
inputs.add(new ReadEntity(part));
}
fetch = new fetchWork(listP, partP, qb.getParseInfo().getOuterQueryLimit());
}
} catch (HiveException e) {
// Has to use full name to make sure it does not conflict with org.apache.commons.lang.StringUtils