private static PrunedPartitionList getPartitionsFromServer(Table tab,
ExprNodeDesc prunerExpr, HiveConf conf, String alias) throws HiveException {
try {
if (!tab.isPartitioned()) {
// If the table is not partitioned, return everything.
return new PrunedPartitionList(tab, getAllPartitions(tab), false);
}
LOG.debug("tabname = " + tab.getTableName() + " is partitioned");
if ("strict".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE))
&& !hasColumnExpr(prunerExpr)) {
// If the "strict" mode is on, we have to provide partition pruner for each table.
throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE
.getMsg("for Alias \"" + alias + "\" Table \"" + tab.getTableName() + "\""));
}
if (prunerExpr == null) {
// Non-strict mode, and there is no predicates at all - get everything.
return new PrunedPartitionList(tab, getAllPartitions(tab), false);
}
// Replace virtual columns with nulls. See javadoc for details.
prunerExpr = removeNonPartCols(prunerExpr, extractPartColNames(tab));
// Remove all parts that are not partition columns. See javadoc for details.
ExprNodeGenericFuncDesc compactExpr = (ExprNodeGenericFuncDesc)compactExpr(prunerExpr.clone());
String oldFilter = prunerExpr.getExprString();
if (compactExpr == null) {
// Non-strict mode, and all the predicates are on non-partition columns - get everything.
LOG.debug("Filter " + oldFilter + " was null after compacting");
return new PrunedPartitionList(tab, getAllPartitions(tab), true);
}
LOG.debug("Filter w/ compacting: " + compactExpr.getExprString()
+ "; filter w/o compacting: " + oldFilter);
// Finally, check the filter for non-built-in UDFs. If these are present, we cannot
// do filtering on the server, and have to fall back to client path.
boolean doEvalClientSide = hasUserFunctions(compactExpr);
// Now filter.
List<Partition> partitions = new ArrayList<Partition>();
boolean hasUnknownPartitions = false;
PerfLogger perfLogger = PerfLogger.getPerfLogger();
if (!doEvalClientSide) {
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
try {
hasUnknownPartitions = Hive.get().getPartitionsByExpr(
tab, compactExpr, conf, partitions);
} catch (IMetaStoreClient.IncompatibleMetastoreException ime) {
// TODO: backward compat for Hive <= 0.12. Can be removed later.
LOG.warn("Metastore doesn't support getPartitionsByExpr", ime);
doEvalClientSide = true;
} finally {
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
}
}
if (doEvalClientSide) {
// Either we have user functions, or metastore is old version - filter names locally.
hasUnknownPartitions = pruneBySequentialScan(tab, partitions, compactExpr, conf);
}
// The partitions are "unknown" if the call says so due to the expression
// evaluator returning null for a partition, or if we sent a partial expression to
// metastore and so some partitions may have no data based on other filters.
boolean isPruningByExactFilter = oldFilter.equals(compactExpr.getExprString());
return new PrunedPartitionList(tab, new LinkedHashSet<Partition>(partitions),
hasUnknownPartitions || !isPruningByExactFilter);
} catch (HiveException e) {
throw e;
} catch (Exception e) {
throw new HiveException(e);