Package org.voltdb.plannodes

Examples of org.voltdb.plannodes.NodeSchema


        assertEquals(4, pn.getOutputSchema().getColumns().size());

        pn = compile("select A,C  FROM R1 A JOIN R2 B USING(A)");
        pn = pn.getChild(0);
        assertTrue(pn instanceof ProjectionPlanNode);
        NodeSchema ns = pn.getOutputSchema();
        for (SchemaColumn sc : ns.getColumns()) {
            AbstractExpression e = sc.getExpression();
            assertTrue(e instanceof TupleValueExpression);
            TupleValueExpression tve = (TupleValueExpression) e;
            assertNotSame(-1, tve.getColumnIndex());
        }
View Full Code Here


    public void testDisplayColumnFromUsingCondition() {
        AbstractPlanNode pn = compile("select  max(A) FROM R1 JOIN R2 USING(A)");
        pn = pn.getChild(0);
        assertNotNull(AggregatePlanNode.getInlineAggregationNode(pn));
        assertTrue(pn instanceof NestLoopPlanNode);
        NodeSchema ns = pn.getOutputSchema();
        for (SchemaColumn sc : ns.getColumns()) {
            AbstractExpression e = sc.getExpression();
            assertTrue(e instanceof TupleValueExpression);
            TupleValueExpression tve = (TupleValueExpression) e;
            assertNotSame(-1, tve.getColumnIndex());
        }

        pn = compile("select  distinct(A) FROM R1 JOIN R2 USING(A)");
        pn = pn.getChild(0);
        assertTrue(pn instanceof ProjectionPlanNode);
        ns = pn.getOutputSchema();
        for (SchemaColumn sc : ns.getColumns()) {
            AbstractExpression e = sc.getExpression();
            assertTrue(e instanceof TupleValueExpression);
            TupleValueExpression tve = (TupleValueExpression) e;
            assertNotSame(-1, tve.getColumnIndex());
        }
        pn = pn.getChild(0);
        assertTrue(pn instanceof DistinctPlanNode);
        ns = pn.getOutputSchema();
        for (SchemaColumn sc : ns.getColumns()) {
            AbstractExpression e = sc.getExpression();
            assertTrue(e instanceof TupleValueExpression);
            TupleValueExpression tve = (TupleValueExpression) e;
            assertNotSame(-1, tve.getColumnIndex());
        }

        pn = compile("select  A  FROM R1 JOIN R2 USING(A) ORDER BY A");
        pn = pn.getChild(0);
        assertTrue(pn instanceof ProjectionPlanNode);
        ns = pn.getOutputSchema();
        for (SchemaColumn sc : ns.getColumns()) {
            AbstractExpression e = sc.getExpression();
            assertTrue(e instanceof TupleValueExpression);
            TupleValueExpression tve = (TupleValueExpression) e;
            assertNotSame(-1, tve.getColumnIndex());
        }
        pn = pn.getChild(0);
        assertTrue(pn instanceof OrderByPlanNode);
        ns = pn.getOutputSchema();
        for (SchemaColumn sc : ns.getColumns()) {
            AbstractExpression e = sc.getExpression();
            assertTrue(e instanceof TupleValueExpression);
            TupleValueExpression tve = (TupleValueExpression) e;
            assertNotSame(-1, tve.getColumnIndex());
        }

        List<AbstractPlanNode> apl;
        AbstractPlanNode node;
        SeqScanPlanNode seqScan;
        NestLoopPlanNode nlj;

        apl = compileToFragments("select * FROM P1 LABEL JOIN R2 USING(A) WHERE A > 0 and R2.C >= 5");
        pn = apl.get(1);
        node = pn.getChild(0);
        assertTrue(node instanceof NestLoopPlanNode);
        assertEquals(ExpressionType.COMPARE_EQUAL,
                     ((NestLoopPlanNode)node).getJoinPredicate().getExpressionType());
        assertTrue(node.getChild(0) instanceof SeqScanPlanNode);
        seqScan = (SeqScanPlanNode)node.getChild(0);
        assertTrue(seqScan.getPredicate() == null);
        node = node.getChild(1);
        assertTrue(node instanceof SeqScanPlanNode);
        seqScan = (SeqScanPlanNode)node;
        assertEquals(ExpressionType.CONJUNCTION_AND, seqScan.getPredicate().getExpressionType());

        apl = compileToFragments("select * FROM P1 LABEL LEFT JOIN R2 USING(A) WHERE A > 0");
        pn = apl.get(1);
        node = pn.getChild(0);
        assertTrue(node instanceof NestLoopPlanNode);
        nlj = (NestLoopPlanNode) node;
        assertTrue(JoinType.LEFT == nlj.getJoinType());
        assertEquals(ExpressionType.COMPARE_EQUAL, nlj.getJoinPredicate().getExpressionType());
        seqScan = (SeqScanPlanNode)node.getChild(0);
        assertTrue(seqScan.getPredicate() != null);
        assertEquals(ExpressionType.COMPARE_GREATERTHAN, seqScan.getPredicate().getExpressionType());

        apl = compileToFragments("select A FROM R2 LABEL RIGHT JOIN P1 AP1 USING(A) WHERE A > 0");
        pn = apl.get(0);
        ns = pn.getOutputSchema();
        assertEquals(1, ns.size());
        SchemaColumn sc = ns.getColumns().get(0);
        assertEquals("AP1", sc.getTableAlias());
        assertEquals("P1", sc.getTableName());
        pn = apl.get(1);
        node = pn.getChild(0);
        assertTrue(node instanceof NestLoopPlanNode);
        nlj = (NestLoopPlanNode) node;
        assertTrue(JoinType.LEFT == nlj.getJoinType());
        assertEquals(ExpressionType.COMPARE_EQUAL, nlj.getJoinPredicate().getExpressionType());
        seqScan = (SeqScanPlanNode)node.getChild(0);
        assertTrue(seqScan.getPredicate() != null);
        assertEquals(ExpressionType.COMPARE_GREATERTHAN, seqScan.getPredicate().getExpressionType());
        ns = seqScan.getOutputSchema();
        assertEquals(1, ns.size());
        sc = ns.getColumns().get(0);
        assertEquals("AP1", sc.getTableAlias());
        assertEquals("P1", sc.getTableName());

    }
View Full Code Here

        m_orderColumns = new ArrayList<ParsedColInfo>();
        AbstractExpression tmpHaving = m_having;


        boolean tmpHasComplexAgg = hasComplexAgg();
        NodeSchema tmpNodeSchema = m_projectSchema;

        // Make final schema output null to get a new schema when calling placeTVEsinColumns().
        m_projectSchema = null;

        m_aggregationList = new ArrayList<AbstractExpression>();
View Full Code Here

            index++;
        }

        // Replace TVE for display columns
        if (m_projectSchema == null) {
            m_projectSchema = new NodeSchema();
            for (ParsedColInfo col : m_displayColumns) {
                AbstractExpression expr = col.expression;
                if (hasComplexAgg()) {
                    expr = col.expression.replaceWithTVE(aggTableIndexMap, indexToColumnMap);
                }
View Full Code Here

                CatalogUtil.getSortedCatalogItems(table.getColumns(), "index");

        // Start to do real materialized view processing to fix the duplicates problem.
        // (1) construct new projection columns for scan plan node.
        Set<SchemaColumn> mvDDLGroupbyColumns = new HashSet<SchemaColumn>();
        NodeSchema inlineProjSchema = new NodeSchema();
        for (SchemaColumn scol: scanColumns) {
            inlineProjSchema.addColumn(scol);
        }

        String mvTableAlias = getMVTableAlias();

        for (int i = 0; i < numOfGroupByColumns; i++) {
            Column mvCol = mvColumnArray.get(i);
            String colName = mvCol.getName();

            TupleValueExpression tve = new TupleValueExpression(mvTableName, mvTableAlias, colName, colName, i);
            tve.setTypeSizeBytes(mvCol.getType(), mvCol.getSize(), mvCol.getInbytes());

            mvDDLGroupbyColumnNames.add(colName);

            SchemaColumn scol = new SchemaColumn(mvTableName, mvTableAlias, colName, colName, tve);

            mvDDLGroupbyColumns.add(scol);
            if (!scanColumns.contains(scol)) {
                scanColumns.add(scol);
                // construct new projection columns for scan plan node.
                inlineProjSchema.addColumn(scol);
            }
        }


        // Record the re-aggregation type for each scan columns.
        Map<String, ExpressionType> mvColumnReAggType = new HashMap<String, ExpressionType>();
        for (int i = numOfGroupByColumns; i < mvColumnArray.size(); i++) {
            Column mvCol = mvColumnArray.get(i);
            ExpressionType reAggType = ExpressionType.get(mvCol.getAggregatetype());

            if (reAggType == ExpressionType.AGGREGATE_COUNT_STAR ||
                    reAggType == ExpressionType.AGGREGATE_COUNT) {
                reAggType = ExpressionType.AGGREGATE_SUM;
            }
            mvColumnReAggType.put(mvCol.getName(), reAggType);
        }

        m_scanInlinedProjectionNode = new ProjectionPlanNode();
        m_scanInlinedProjectionNode.setOutputSchema(inlineProjSchema);

        // (2) Construct the reAggregation Node.

        // Construct the reAggregation plan node's aggSchema
        m_reAggNode = new HashAggregatePlanNode();
        int outputColumnIndex = 0;
        // inlineProjSchema contains the group by columns, while aggSchema may do not.
        NodeSchema aggSchema = new NodeSchema();

        // Construct reAggregation node's aggregation and group by list.
        for (SchemaColumn scol: scanColumns) {
            if (mvDDLGroupbyColumns.contains(scol)) {
                // Add group by expression.
                m_reAggNode.addGroupByExpression(scol.getExpression());
            } else {
                ExpressionType reAggType = mvColumnReAggType.get(scol.getColumnName());
                assert(reAggType != null);
                AbstractExpression agg_input_expr = scol.getExpression();
                assert(agg_input_expr instanceof TupleValueExpression);
                // Add aggregation information.
                m_reAggNode.addAggregate(reAggType, false, outputColumnIndex, agg_input_expr);
            }
            aggSchema.addColumn(scol);
            outputColumnIndex++;
        }
        m_reAggNode.setOutputSchema(aggSchema);

View Full Code Here

        DeletePlanNode deleteNode = new DeletePlanNode();
        deleteNode.setTargetTableName(targetTable.getTypeName());

        ProjectionPlanNode projectionNode = new ProjectionPlanNode();
        AbstractExpression addressExpr = new TupleAddressExpression();
        NodeSchema proj_schema = new NodeSchema();
        // This planner-created column is magic.
        proj_schema.addColumn(new SchemaColumn("VOLT_TEMP_TABLE",
                                               "VOLT_TEMP_TABLE",
                                               "tuple_address",
                                               "tuple_address",
                                               addressExpr));
        projectionNode.setOutputSchema(proj_schema);
View Full Code Here

        // set this to false until proven otherwise
        updateNode.setUpdateIndexes(false);

        ProjectionPlanNode projectionNode = new ProjectionPlanNode();
        TupleAddressExpression tae = new TupleAddressExpression();
        NodeSchema proj_schema = new NodeSchema();
        // This planner-generated column is magic.
        proj_schema.addColumn(new SchemaColumn("VOLT_TEMP_TABLE",
                                               "VOLT_TEMP_TABLE",
                                               "tuple_address",
                                               "tuple_address",
                                               tae));

        // get the set of columns affected by indexes
        Set<String> affectedColumns = getIndexedColumnSetForTable(targetTable);

        // add the output columns we need to the projection
        //
        // Right now, the EE is going to use the original column names
        // and compare these to the persistent table column names in the
        // update executor in order to figure out which table columns get
        // updated.  We'll associate the actual values with VOLT_TEMP_TABLE
        // to avoid any false schema/column matches with the actual table.
        for (Entry<Column, AbstractExpression> col : m_parsedUpdate.columns.entrySet()) {
            String tableName = col.getKey().getTypeName();
            AbstractExpression expr = col.getValue();
            expr.setInBytes(col.getKey().getInbytes());

            proj_schema.addColumn(new SchemaColumn("VOLT_TEMP_TABLE",
                                                   "VOLT_TEMP_TABLE",
                                                   tableName,
                                                   tableName,
                                                   expr));
View Full Code Here

                String fullColumnName = targetTable.getTypeName() + "." + col.getTypeName();
                m_partitioning.addPartitioningExpression(fullColumnName, expr, expr.getValueType());
            }
        }

        NodeSchema matSchema = null;
        if (subquery == null) {
            matSchema = new NodeSchema();
        }

        int[] fieldMap = new int[m_parsedInsert.m_columns.size()];
        int i = 0;

        // The insert statement's set of columns are contained in a LinkedHashMap,
        // meaning that we'll iterate over the columns here in the order that the user
        // specified them in the original SQL.  (If the statement didn't specify any
        // columns, then all the columns will be in the map in schema order.)
        //   - Build the field map, used by insert executor to build tuple to execute
        //   - For VALUES(...) insert statements, build the materialize node's schema
        for (Map.Entry<Column, AbstractExpression> e : m_parsedInsert.m_columns.entrySet()) {
            Column col = e.getKey();
            fieldMap[i] = col.getIndex();

            if (matSchema != null) {
                AbstractExpression valExpr = e.getValue();
                valExpr.setInBytes(col.getInbytes());

                // Patch over any mismatched expressions with an explicit cast.
                // Most impossible-to-cast type combinations should have already been caught by the
                // parser, but there are also runtime checks in the casting code
                // -- such as for out of range values.
                valExpr = castExprIfNeeded(valExpr, col);

                matSchema.addColumn(new SchemaColumn("VOLT_TEMP_TABLE",
                        "VOLT_TEMP_TABLE",
                        col.getTypeName(),
                        col.getTypeName(),
                        valExpr));
            }
View Full Code Here

            // TVE but it's logically different so we'll create a fresh one.
            TupleValueExpression tve = new TupleValueExpression(
                    "VOLT_TEMP_TABLE", "VOLT_TEMP_TABLE", "modified_tuples", "modified_tuples", 0);
            tve.setValueType(VoltType.BIGINT);
            tve.setValueSize(VoltType.BIGINT.getLengthInBytesForFixedTypes());
            NodeSchema count_schema = new NodeSchema();
            SchemaColumn col = new SchemaColumn("VOLT_TEMP_TABLE",
                    "VOLT_TEMP_TABLE",
                    "modified_tuples",
                    "modified_tuples",
                    tve);
            count_schema.addColumn(col);
            countNode.setOutputSchema(count_schema);
        }

        // connect the nodes to build the graph
        sumOrLimitNode.addAndLinkChild(dmlRoot);
View Full Code Here

        ProjectionPlanNode projectionNode =
            new ProjectionPlanNode();

        // Build the output schema for the projection based on the display columns
        NodeSchema proj_schema = m_parsedSelect.getFinalProjectionSchema();
        projectionNode.setOutputSchemaWithoutClone(proj_schema);

        // if the projection can be done inline...
        if (rootNode instanceof AbstractScanPlanNode) {
            rootNode.addInlinePlanNode(projectionNode);
View Full Code Here

TOP

Related Classes of org.voltdb.plannodes.NodeSchema

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.