Package com.salesforce.phoenix.jdbc

Examples of com.salesforce.phoenix.jdbc.PhoenixConnection


    public CreateIndexCompiler(PhoenixStatement statement) {
        this.statement = statement;
    }

    public MutationPlan compile(final CreateIndexStatement create) throws SQLException {
        final PhoenixConnection connection = statement.getConnection();
        final ColumnResolver resolver = FromCompiler.getResolver(create, connection);
        Scan scan = new Scan();
        final StatementContext context = new StatementContext(statement, resolver, statement.getParameters(), scan);
        ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);
        List<ParseNode> splitNodes = create.getSplitNodes();
View Full Code Here


    public DeleteCompiler(PhoenixStatement statement) {
        this.statement = statement;
    }
   
    private static MutationState deleteRows(PhoenixStatement statement, TableRef tableRef, ResultIterator iterator, RowProjector projector) throws SQLException {
        PhoenixConnection connection = statement.getConnection();
        final boolean isAutoCommit = connection.getAutoCommit();
        ConnectionQueryServices services = connection.getQueryServices();
        final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
        final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
        Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutations = Maps.newHashMapWithExpectedSize(batchSize);
        try {
            PTable table = tableRef.getTable();
            List<PColumn> pkColumns = table.getPKColumns();
            int offset = table.getBucketNum() == null ? 0 : 1; // Take into account salting
            byte[][] values = new byte[pkColumns.size()][];
            ResultSet rs = new PhoenixResultSet(iterator, projector, statement);
            int rowCount = 0;
            while (rs.next()) {
                for (int i = offset; i < values.length; i++) {
                    byte[] byteValue = rs.getBytes(i+1-offset);
                    // The ResultSet.getBytes() call will have inverted it - we need to invert it back.
                    // TODO: consider going under the hood and just getting the bytes
                    if (pkColumns.get(i).getColumnModifier() == ColumnModifier.SORT_DESC) {
                        byte[] tempByteValue = Arrays.copyOf(byteValue, byteValue.length);
                        byteValue = ColumnModifier.SORT_DESC.apply(byteValue, 0, tempByteValue, 0, byteValue.length);
                    }
                    values[i] = byteValue;
                }
                ImmutableBytesPtr ptr = new ImmutableBytesPtr();
                table.newKey(ptr, values);
                mutations.put(ptr, PRow.DELETE_MARKER);
                if (mutations.size() > maxSize) {
                    throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize);
                }
                rowCount++;
                // Commit a batch if auto commit is true and we're at our batch size
                if (isAutoCommit && rowCount % batchSize == 0) {
                    MutationState state = new MutationState(tableRef, mutations, 0, maxSize, connection);
                    connection.getMutationState().join(state);
                    connection.commit();
                    mutations.clear();
                }
            }

            // If auto commit is true, this last batch will be committed upon return
View Full Code Here

        }
        return false;
    }
   
    public MutationPlan compile(DeleteStatement delete) throws SQLException {
        final PhoenixConnection connection = statement.getConnection();
        final boolean isAutoCommit = connection.getAutoCommit();
        final ConnectionQueryServices services = connection.getQueryServices();
        final ColumnResolver resolver = FromCompiler.getResolver(delete, connection);
        final TableRef tableRef = resolver.getTables().get(0);
        PTable table = tableRef.getTable();
        if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
            throw new ReadOnlyTableException(table.getSchemaName().getString(),table.getTableName().getString());
        }
       
        final boolean hasLimit = delete.getLimit() != null;
        boolean noQueryReqd = !hasLimit && !hasImmutableIndex(tableRef);
        boolean runOnServer = isAutoCommit && noQueryReqd;
        HintNode hint = delete.getHint();
        if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
            hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
        }

        List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(table.getPKColumns().size());
        boolean isSalted = table.getBucketNum() != null;
        boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant();
        for (int i = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0); i < table.getPKColumns().size(); i++) {
            PColumn column = table.getPKColumns().get(i);
            aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
        }
        SelectStatement select = FACTORY.select(
View Full Code Here

    public QueryPlan compile(SelectStatement select) throws SQLException{
        return compile(select, scan, false);
    }
   
    protected QueryPlan compile(SelectStatement select, Scan scan, boolean asSubquery) throws SQLException{       
        PhoenixConnection connection = statement.getConnection();
        List<Object> binds = statement.getParameters();
        ColumnResolver resolver = FromCompiler.getMultiTableResolver(select, connection);
        // TODO: do this normalization outside of this so as it's not repeated by the optimizer
        select = StatementNormalizer.normalize(select, resolver);
        StatementContext context = new StatementContext(statement, resolver, binds, scan);
View Full Code Here

        // Do not support queries like "A right join B left join C" with hash-joins.
        throw new SQLFeatureNotSupportedException("Joins with pattern 'A right join B left join C' not supported.");
    }
   
    protected BasicQueryPlan compileSingleQuery(StatementContext context, SelectStatement select, List<Object> binds) throws SQLException{
        PhoenixConnection connection = statement.getConnection();
        ColumnResolver resolver = context.getResolver();
        TableRef tableRef = context.getCurrentTable();
        // Short circuit out if we're compiling an index query and the index isn't active.
        // We must do this after the ColumnResolver resolves the table, as we may be updating the local
        // cache of the index table and it may now be inactive.
View Full Code Here

    abstract protected MutationState mutate(PhoenixConnection connection, ResultIterator iterator) throws SQLException;
   
    @Override
    public PeekingResultIterator newIterator(ResultIterator iterator) throws SQLException {
        // Clone the connection as it's not thread safe and will be operated on in parallel
        final PhoenixConnection connection = new PhoenixConnection(this.connection);
        MutationState state = mutate(connection, iterator);
        long totalRowCount = state.getUpdateCount();
        if (connection.getAutoCommit()) {
            connection.getMutationState().join(state);
            connection.commit();
            ConnectionQueryServices services = connection.getQueryServices();
            int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
            state = new MutationState(maxSize, connection, totalRowCount);
        }
        final MutationState finalState = state;
        byte[] value = PDataType.LONG.toBytes(totalRowCount);
        KeyValue keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
        final Tuple tuple = new SingleKeyValueTuple(keyValue);
        return new PeekingResultIterator() {
            private boolean done = false;
           
            @Override
            public Tuple next() throws SQLException {
                if (done) {
                    return null;
                }
                done = true;
                return tuple;
            }

            @Override
            public void explain(List<String> planSteps) {
            }

            @Override
            public void close() throws SQLException {
                try {
                    // Join the child mutation states in close, since this is called in a single threaded manner
                    // after the parallel results have been processed.
                    if (!connection.getAutoCommit()) {
                        MutatingParallelIteratorFactory.this.connection.getMutationState().join(finalState);
                    }
                } finally {
                    connection.close();
                }
            }

            @Override
            public Tuple peek() throws SQLException {
View Full Code Here

    @Override
    public PhoenixConnection connect(String url, Properties info) throws SQLException {
        Long scn = JDBCUtil.getCurrentSCN(url, info);
        PMetaData metaData = scn == null ? latestMetaData : PMetaDataImpl.pruneNewerTables(scn, latestMetaData);
        return new PhoenixConnection(this, url, info, metaData);
    }
View Full Code Here

    // Keeping this to use for further upgrades
    protected PhoenixConnection addColumnsIfNotExists(PhoenixConnection oldMetaConnection, long timestamp, String columns) throws SQLException {
        Properties props = new Properties(oldMetaConnection.getClientInfo());
        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp));
        // Cannot go through DriverManager or you end up in an infinite loop because it'll call init again
        PhoenixConnection metaConnection = new PhoenixConnection(this, oldMetaConnection.getURL(), props, oldMetaConnection.getPMetaData());
        SQLException sqlE = null;
        try {
            metaConnection.createStatement().executeUpdate("ALTER TABLE " + PhoenixDatabaseMetaData.TYPE_SCHEMA_AND_TABLE + " ADD IF NOT EXISTS " + columns );
        } catch (SQLException e) {
            sqlE = e;
        } finally {
            try {
                oldMetaConnection.close();
View Full Code Here

   
    @Override
    public void init(String url, Properties props) throws SQLException {
        props = new Properties(props);
        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
        PhoenixConnection metaConnection = new PhoenixConnection(this, url, props, PMetaDataImpl.EMPTY_META_DATA);
        SQLException sqlE = null;
        try {
            try {
                metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_TABLE_METADATA);
            } catch (NewerTableAlreadyExistsException ignore) {
                // Ignore, as this will happen if the SYSTEM.TABLE already exists at this fixed timestamp.
                // A TableAlreadyExistsException is not thrown, since the table only exists *after* this fixed timestamp.
            }
            try {
                metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_SEQUENCE_METADATA);
            } catch (NewerTableAlreadyExistsException ignore) {
                // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed timestamp.
                // A TableAlreadyExistsException is not thrown, since the table only exists *after* this fixed timestamp.
            }
        } catch (SQLException e) {
            sqlE = e;
        } finally {
            try {
                metaConnection.close();
            } catch (SQLException e) {
                if (sqlE != null) {
                    sqlE.setNextException(e);
                } else {
                    sqlE = e;
View Full Code Here

    }

   
    @Override
    public PhoenixConnection connect(String url, Properties info) throws SQLException {
        return new PhoenixConnection(this, url, info, metaData);
    }
View Full Code Here

TOP

Related Classes of com.salesforce.phoenix.jdbc.PhoenixConnection

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.