rootExpr.getExpressionType() == ExpressionType.AGGREGATE_COUNT_STAR)
{
PlanColumn aggregateColumn = null;
if (rootExpr.getLeft() instanceof TupleValueExpression)
{
TupleValueExpression nested =
(TupleValueExpression) rootExpr.getLeft();
if (((AggregateExpression)rootExpr).m_distinct) {
root = addDistinctNode(root, nested);
}
aggregateColumn =
root.findMatchingOutputColumn(nested.getTableName(),
nested.getColumnName(),
nested.getColumnAlias());
}
// count(*) hack. we're not getting AGGREGATE_COUNT_STAR
// expression types from the parsing, so we have
// to detect the null inner expression case and do the
// switcharoo ourselves.
else if (rootExpr.getExpressionType() == ExpressionType.AGGREGATE_COUNT &&
rootExpr.getLeft() == null)
{
aggregateColumn =
m_context.get(root.getOutputColumnGUIDs().get(0));
agg_expression_type = ExpressionType.AGGREGATE_COUNT_STAR;
}
else
{
throw new PlanningErrorException("Expressions in aggregates currently unsupported");
}
aggNode.getAggregateColumnGuids().add(aggregateColumn.guid());
aggNode.getAggregateColumnNames().add(aggregateColumn.getDisplayName());
aggNode.getAggregateTypes().add(agg_expression_type);
// A bit of a hack: ProjectionNodes using PlanColumns after the
// aggregate node need the output columns here to
// contain TupleValueExpressions (effectively on a temp table).
// So we construct one based on the output of the
// aggregate expression, the column alias provided by HSQL,
// and the offset into the output table schema for the
// aggregate node that we're computing.
TupleValueExpression tve = new TupleValueExpression();
// If this is an AVG, then our type should be DECIMAL
if (agg_expression_type == ExpressionType.AGGREGATE_AVG) {
tve.setValueType(VoltType.FLOAT);
tve.setValueSize(VoltType.FLOAT.getLengthInBytesForFixedTypes());
}
// Otherwise it can be whatever the rootExpression is
else {
tve.setValueType(rootExpr.getValueType());
tve.setValueSize(rootExpr.getValueSize());
}
tve.setColumnIndex(outputColumnIndex);
tve.setColumnName("");
tve.setColumnAlias(col.alias);
tve.setTableName(AGGREGATE_TEMP_TABLE);
PlanColumn colInfo = m_context.getPlanColumn(tve, col.alias);
aggNode.appendOutputColumn(colInfo);
aggNode.getAggregateOutputColumns().add(outputColumnIndex);
}
else
{
/*
* These columns are the pass through columns that are not being
* aggregated on. These are the ones from the SELECT list. They
* MUST already exist in the child node's output. Find them and
* add them to the aggregate's output.
*/
PlanColumn passThruColumn =
root.findMatchingOutputColumn(col.tableName,
col.columnName,
col.alias);
aggNode.appendOutputColumn(passThruColumn);
}
outputColumnIndex++;
}
aggNode.addAndLinkChild(root);
root = aggNode;
}
// PAVLO: Push non-AVG aggregates down into the scan for multi-partition queries
// 2012-02-15: Moved to AggregatePushdownOptimization
// handle select distinct a from t - which is planned as an aggregate but
// doesn't trigger the above aggregate conditions as it is neither grouped
// nor does it have aggregate expressions
if (aggNode == null && m_parsedSelect.distinct) {
// We currently can't handle DISTINCT of multiple columns.
// Throw a planner error if this is attempted.
if (m_parsedSelect.displayColumns.size() > 1)
{
throw new PlanningErrorException("Multiple DISTINCT columns currently unsupported");
}
for (ParsedSelectStmt.ParsedColInfo col : m_parsedSelect.displayColumns) {
if (col.expression instanceof TupleValueExpression)
{
TupleValueExpression colexpr = (TupleValueExpression)(col.expression);
root = addDistinctNode(root, colexpr);
// aggregate handlers are expected to produce the required projection.
// the other aggregates do this inherently but distinct may need a
// projection node.