Package org.apache.phoenix.execute

Examples of org.apache.phoenix.execute.MutationState


                                }
                                ResultIterator iterator = aggPlan.iterator();
                                try {
                                    Tuple row = iterator.next();
                                    final long mutationCount = (Long)aggProjector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr);
                                    return new MutationState(maxSize, connection) {
                                        @Override
                                        public long getUpdateCount() {
                                            return mutationCount;
                                        }
                                    };
                                } finally {
                                    iterator.close();
                                }
                            } finally {
                                if (cache != null) {
                                    cache.close();
                                }
                            }
                        }
   
                        @Override
                        public ExplainPlan getExplainPlan() throws SQLException {
                            List<String> queryPlanSteps =  aggPlan.getExplainPlan().getPlanSteps();
                            List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size()+1);
                            planSteps.add("UPSERT ROWS");
                            planSteps.addAll(queryPlanSteps);
                            return new ExplainPlan(planSteps);
                        }
                    };
                }
            }

            ////////////////////////////////////////////////////////////////////
            // UPSERT SELECT run client-side
            /////////////////////////////////////////////////////////////////////
            return new MutationPlan() {

                @Override
                public PhoenixConnection getConnection() {
                    return connection;
                }
               
                @Override
                public ParameterMetaData getParameterMetaData() {
                    return queryPlan.getContext().getBindManager().getParameterMetaData();
                }

                @Override
                public StatementContext getContext() {
                    return queryPlan.getContext();
                }

                @Override
                public MutationState execute() throws SQLException {
                    ResultIterator iterator = queryPlan.iterator();
                    if (parallelIteratorFactory == null) {
                        return upsertSelect(statement, tableRef, projector, iterator, columnIndexes, pkSlotIndexes);
                    }
                    parallelIteratorFactory.setRowProjector(projector);
                    parallelIteratorFactory.setColumnIndexes(columnIndexes);
                    parallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes);
                    Tuple tuple;
                    long totalRowCount = 0;
                    while ((tuple=iterator.next()) != null) {// Runs query
                        Cell kv = tuple.getValue(0);
                        totalRowCount += PDataType.LONG.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
                    }
                    // Return total number of rows that have been updated. In the case of auto commit being off
                    // the mutations will all be in the mutation state of the current connection.
                    return new MutationState(maxSize, statement.getConnection(), totalRowCount);
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    List<String> queryPlanSteps =  queryPlan.getExplainPlan().getPlanSteps();
                    List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size()+1);
                    planSteps.add("UPSERT SELECT");
                    planSteps.addAll(queryPlanSteps);
                    return new ExplainPlan(planSteps);
                }
               
            };
        }

           
        ////////////////////////////////////////////////////////////////////
        // UPSERT VALUES
        /////////////////////////////////////////////////////////////////////
        int nodeIndex = 0;
        // initialze values with constant byte values first
        final byte[][] values = new byte[nValuesToSet][];
        if (isTenantSpecific) {
            values[nodeIndex++] = connection.getTenantId().getBytes();
        }
        if (isSharedViewIndex) {
            values[nodeIndex++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
        }
        final int nodeIndexOffset = nodeIndex;
        // Allocate array based on size of all columns in table,
        // since some values may not be set (if they're nullable).
        final StatementContext context = new StatementContext(statement, resolver, new Scan(), new SequenceManager(statement));
        UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context);
        final List<Expression> constantExpressions = Lists.newArrayListWithExpectedSize(valueNodes.size());
        // First build all the expressions, as with sequences we want to collect them all first
        // and initialize them in one batch
        for (ParseNode valueNode : valueNodes) {
            if (!valueNode.isStateless()) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT).build().buildException();
            }
            PColumn column = allColumns.get(columnIndexes[nodeIndex]);
            expressionBuilder.setColumn(column);
            Expression expression = valueNode.accept(expressionBuilder);
            if (expression.getDataType() != null && !expression.getDataType().isCastableTo(column.getDataType())) {
                throw TypeMismatchException.newException(
                        expression.getDataType(), column.getDataType(), "expression: "
                                + expression.toString() + " in column " + column);
            }
            constantExpressions.add(expression);
            nodeIndex++;
        }
        return new MutationPlan() {

            @Override
            public PhoenixConnection getConnection() {
                return connection;
            }

            @Override
            public ParameterMetaData getParameterMetaData() {
                return context.getBindManager().getParameterMetaData();
            }

            @Override
            public StatementContext getContext() {
                return context;
            }

            @Override
            public MutationState execute() throws SQLException {
                ImmutableBytesWritable ptr = context.getTempPtr();
                final SequenceManager sequenceManager = context.getSequenceManager();
                // Next evaluate all the expressions
                int nodeIndex = nodeIndexOffset;
                Tuple tuple = sequenceManager.getSequenceCount() == 0 ? null :
                    sequenceManager.newSequenceTuple(null);
                for (Expression constantExpression : constantExpressions) {
                    PColumn column = allColumns.get(columnIndexes[nodeIndex]);
                    constantExpression.evaluate(tuple, ptr);
                    Object value = null;
                    if (constantExpression.getDataType() != null) {
                        value = constantExpression.getDataType().toObject(ptr, constantExpression.getSortOrder(), constantExpression.getMaxLength(), constantExpression.getScale());
                        if (!constantExpression.getDataType().isCoercibleTo(column.getDataType(), value)) {
                            throw TypeMismatchException.newException(
                                constantExpression.getDataType(), column.getDataType(), "expression: "
                                        + constantExpression.toString() + " in column " + column);
                        }
                        if (!column.getDataType().isSizeCompatible(ptr, value, constantExpression.getDataType(),
                                constantExpression.getMaxLength(), constantExpression.getScale(),
                                column.getMaxLength(), column.getScale())) {
                            throw new SQLExceptionInfo.Builder(
                                SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName(column.getName().getString())
                                .setMessage("value=" + constantExpression.toString()).build().buildException();
                        }
                    }
                    column.getDataType().coerceBytes(ptr, value,
                            constantExpression.getDataType(), constantExpression.getMaxLength(), constantExpression.getScale(), constantExpression.getSortOrder(),
                            column.getMaxLength(), column.getScale(),column.getSortOrder());
                    if (overlapViewColumns.contains(column) && Bytes.compareTo(ptr.get(), ptr.getOffset(), ptr.getLength(), column.getViewConstant(), 0, column.getViewConstant().length-1) != 0) {
                        throw new SQLExceptionInfo.Builder(
                                SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN)
                                .setColumnName(column.getName().getString())
                                .setMessage("value=" + constantExpression.toString()).build().buildException();
                    }
                    values[nodeIndex] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                    nodeIndex++;
                }
                // Add columns based on view
                for (PColumn column : addViewColumns) {
                    if (IndexUtil.getViewConstantValue(column, ptr)) {
                        values[nodeIndex++] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                    } else {
                        throw new IllegalStateException();
                    }
                }
                Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation = Maps.newHashMapWithExpectedSize(1);
                setValues(values, pkSlotIndexes, columnIndexes, tableRef.getTable(), mutation);
                return new MutationState(tableRef, mutation, 0, maxSize, connection);
            }

            @Override
            public ExplainPlan getExplainPlan() throws SQLException {
                List<String> planSteps = Lists.newArrayListWithExpectedSize(2);
View Full Code Here


        }
    }

    @Override
    public MutationState updateData(MutationPlan plan) throws SQLException {
        return new MutationState(0, plan.getConnection());
    }
View Full Code Here

                        table.getTimeStamp() >= maxTimestamp ||
                         ! Objects.equal(tenantId, table.getTenantId())) );
            }
           
        });
        this.mutationState = new MutationState(maxSize, this);
        this.services.addConnection(this);

        // setup tracing, if its enabled
        this.sampler = Tracing.getConfiguredSampler(this);
    }
View Full Code Here

    }

    public MutationState createTable(CreateTableStatement statement, byte[][] splits, PTable parent, String viewStatement, ViewType viewType, byte[][] viewColumnConstants, BitSet isViewColumnReferenced) throws SQLException {
        PTable table = createTableInternal(statement, splits, parent, viewStatement, viewType, viewColumnConstants, isViewColumnReferenced, null, null);
        if (table == null || table.getType() == PTableType.VIEW) {
            return new MutationState(0,connection);
        }
        // Hack to get around the case when an SCN is specified on the connection.
        // In this case, we won't see the table we just created yet, so we hack
        // around it by forcing the compiler to not resolve anything.
        PostDDLCompiler compiler = new PostDDLCompiler(connection);
View Full Code Here

        ColumnResolver resolver = FromCompiler.getResolver(dataTableNode, conn);
        TableRef tableRef = resolver.getTables().get(0);
        boolean success = false;
        SQLException sqlException = null;
        try {
            MutationState state = newClientAtNextTimeStamp.buildIndex(index, tableRef);
            success = true;
            return state;
        } catch (SQLException e) {
            sqlException = e;
        } finally {
View Full Code Here

        AlterIndexStatement indexStatement = null;
        boolean wasAutoCommit = connection.getAutoCommit();
        connection.rollback();
        try {
            connection.setAutoCommit(true);
            MutationState state;
           
            // For local indexes, we optimize the initial index population by *not* sending Puts over
            // the wire for the index rows, as we don't need to do that. Instead, we tap into our
            // region observer to generate the index rows based on the data rows as we scan
            if (index.getIndexType() == IndexType.LOCAL) {
                final PhoenixStatement statement = new PhoenixStatement(connection);
                String tableName = getFullTableName(dataTableRef);
                String query = "SELECT count(*) FROM " + tableName;
                QueryPlan plan = statement.compileQuery(query);
                TableRef tableRef = plan.getContext().getResolver().getTables().get(0);
                // Set attribute on scan that UngroupedAggregateRegionObserver will switch on.
                // We'll detect that this attribute was set the server-side and write the index
                // rows per region as a result. The value of the attribute will be our persisted
                // index maintainers.
                // Define the LOCAL_INDEX_BUILD as a new static in BaseScannerRegionObserver
                Scan scan = plan.getContext().getScan();
                try {
                    scan.setTimeRange(dataTableRef.getLowerBoundTimeStamp(), Long.MAX_VALUE);
                } catch (IOException e) {
                    throw new SQLException(e);
                }
                ImmutableBytesWritable ptr = new ImmutableBytesWritable();
                PTable dataTable = tableRef.getTable();
                List<PTable> indexes = Lists.newArrayListWithExpectedSize(1);
                // Only build newly created index.
                indexes.add(index);
                IndexMaintainer.serialize(dataTable, ptr, indexes);
                scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD, ByteUtil.copyKeyBytesIfNecessary(ptr));
                // By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for count(*).
                // However, in this case, we need to project all of the data columns that contribute to the index.
                IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable);
                for (ColumnReference columnRef : indexMaintainer.getAllColumns()) {
                    scan.addColumn(columnRef.getFamily(), columnRef.getQualifier());
                }
                Cell kv = plan.iterator().next().getValue(0);
                ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
                // A single Cell will be returned with the count(*) - we decode that here
                long rowCount = PDataType.LONG.getCodec().decodeLong(tmpPtr, SortOrder.getDefault());
                // The contract is to return a MutationState that contains the number of rows modified. In this
                // case, it's the number of rows in the data table which corresponds to the number of index
                // rows that were added.
                state = new MutationState(0, connection, rowCount);
            } else {
                PostIndexDDLCompiler compiler = new PostIndexDDLCompiler(connection, dataTableRef);
                MutationPlan plan = compiler.compile(index);
                try {
                    plan.getContext().setScanTimeRange(new TimeRange(dataTableRef.getLowerBoundTimeStamp(), Long.MAX_VALUE));
View Full Code Here

                }
                throw e;
            }
        }
        if (table == null) {
            return new MutationState(0,connection);
        }
       
        // If our connection is at a fixed point-in-time, we need to open a new
        // connection so that our new index table is visible.
        if (connection.getSCN() != null) {
View Full Code Here

        String tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getString();
        try {
            connection.getQueryServices().dropSequence(tenantId, schemaName, sequenceName, timestamp);
        } catch (SequenceNotFoundException e) {
            if (statement.ifExists()) {
                return new MutationState(0, connection);
            }
            throw e;
        }
        return new MutationState(1, connection);
    }
View Full Code Here

        try {
            connection.getQueryServices().createSequence(tenantId, schemaName, sequenceName,
                startWith, incrementBy, cacheSize, minValue, maxValue, cycle, timestamp);
        } catch (SequenceAlreadyExistsException e) {
            if (ifNotExists) {
                return new MutationState(0, connection);
            }
            throw e;
        }
        return new MutationState(1, connection);
    }
View Full Code Here

                            return connection.getQueryServices().updateData(plan);
                        }
                    }
                    break;
                }
                 return new MutationState(0,connection);
        } finally {
            connection.setAutoCommit(wasAutoCommit);
        }
    }
View Full Code Here

TOP

Related Classes of org.apache.phoenix.execute.MutationState

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.