Examples of TableRef


Examples of org.apache.phoenix.schema.TableRef

            // If dynamic columns have been specified add them to the table declaration
            if (!dynamicColumnDefs.isEmpty()) {
                theTable = this.addDynamicColumns(dynamicColumnDefs, theTable);
            }
            TableRef tableRef = new TableRef(alias, theTable, timeStamp, !dynamicColumnDefs.isEmpty());
            return tableRef;
        }
View Full Code Here

Examples of org.apache.phoenix.schema.TableRef

            String schemaName = namedTableNode.getName().getSchemaName();

            String alias = namedTableNode.getAlias();
            List<ColumnDef> dynamicColumnDefs = namedTableNode.getDynamicColumns();

            TableRef tableRef = createTableRef(alias, schemaName, tableName, dynamicColumnDefs);
            PTable theTable = tableRef.getTable();

            if (alias != null) {
                tableMap.put(alias, tableRef);
            }
View Full Code Here

Examples of org.apache.phoenix.schema.TableRef

        private ColumnFamilyRef resolveColumnFamily(String tableName, String cfName) throws SQLException {
            if (tableName == null) {
                ColumnFamilyRef theColumnFamilyRef = null;
                Iterator<TableRef> iterator = tables.iterator();
                while (iterator.hasNext()) {
                    TableRef tableRef = iterator.next();
                    try {
                        PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(cfName);
                        if (theColumnFamilyRef != null) { throw new TableNotFoundException(cfName); }
                        theColumnFamilyRef = new ColumnFamilyRef(tableRef, columnFamily);
                    } catch (ColumnFamilyNotFoundException e) {}
                }
                if (theColumnFamilyRef != null) { return theColumnFamilyRef; }
                throw new TableNotFoundException(cfName);
            } else {
                TableRef tableRef = resolveTable(null, tableName);
                PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(cfName);
                return new ColumnFamilyRef(tableRef, columnFamily);
            }
        }
View Full Code Here

Examples of org.apache.phoenix.schema.TableRef

        @Override
        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
            if (tableName == null) {
                int theColumnPosition = -1;
                TableRef theTableRef = null;
                Iterator<TableRef> iterator = tables.iterator();
                while (iterator.hasNext()) {
                    TableRef tableRef = iterator.next();
                    try {
                        PColumn column = tableRef.getTable().getColumn(colName);
                        if (theTableRef != null) { throw new AmbiguousColumnException(colName); }
                        theTableRef = tableRef;
                        theColumnPosition = column.getPosition();
                    } catch (ColumnNotFoundException e) {

                    }
                }
                if (theTableRef != null) { return new ColumnRef(theTableRef, theColumnPosition); }
                throw new ColumnNotFoundException(colName);
            } else {
                try {
                    TableRef tableRef = resolveTable(schemaName, tableName);
                    PColumn column = tableRef.getTable().getColumn(colName);
                    return new ColumnRef(tableRef, column.getPosition());
                } catch (TableNotFoundException e) {
                    // Try using the tableName as a columnFamily reference instead
                    ColumnFamilyRef cfRef = resolveColumnFamily(schemaName, tableName);
                    PColumn column = cfRef.getFamily().getColumn(colName);
View Full Code Here

Examples of org.apache.phoenix.schema.TableRef

        int i = 0;
        Long scn = connection.getSCN();
        MetaDataClient client = new MetaDataClient(connection);
        long[] timeStamps = new long[this.mutations.size()];
        for (Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> entry : mutations.entrySet()) {
            TableRef tableRef = entry.getKey();
            long serverTimeStamp = tableRef.getTimeStamp();
            PTable table = tableRef.getTable();
            if (!connection.getAutoCommit()) {
                serverTimeStamp = client.updateCache(table.getSchemaName().getString(), table.getTableName().getString());
                if (serverTimeStamp < 0) {
                    serverTimeStamp *= -1;
                    // TODO: use bitset?
                    PColumn[] columns = new PColumn[table.getColumns().size()];
                    for (Map.Entry<ImmutableBytesPtr,Map<PColumn,byte[]>> rowEntry : entry.getValue().entrySet()) {
                        Map<PColumn,byte[]> valueEntry = rowEntry.getValue();
                        if (valueEntry != null) {
                            for (PColumn column : valueEntry.keySet()) {
                                columns[column.getPosition()] = column;
                            }
                        }
                    }
                    table = connection.getPMetaData().getTable(tableRef.getTable().getName().getString());
                    for (PColumn column : columns) {
                        if (column != null) {
                            table.getColumnFamily(column.getFamilyName().getString()).getColumn(column.getName().getString());
                        }
                    }
View Full Code Here

Examples of org.apache.phoenix.schema.TableRef

        Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> iterator = this.mutations.entrySet().iterator();
        List<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> committedList = Lists.newArrayListWithCapacity(this.mutations.size());
        while (iterator.hasNext()) {
            Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> entry = iterator.next();
            Map<ImmutableBytesPtr,Map<PColumn,byte[]>> valuesMap = entry.getValue();
            TableRef tableRef = entry.getKey();
            PTable table = tableRef.getTable();
            table.getIndexMaintainers(tempPtr);
            boolean hasIndexMaintainers = tempPtr.getLength() > 0;
            boolean isDataTable = true;
            long serverTimestamp = serverTimeStamps[i++];
            Iterator<Pair<byte[],List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false);
View Full Code Here

Examples of org.apache.phoenix.schema.TableRef

    public MutationPlan compile(DeleteStatement delete) throws SQLException {
        final PhoenixConnection connection = statement.getConnection();
        final boolean isAutoCommit = connection.getAutoCommit();
        final ConnectionQueryServices services = connection.getQueryServices();
        final ColumnResolver resolver = FromCompiler.getResolver(delete, connection);
        final TableRef tableRef = resolver.getTables().get(0);
        if (tableRef.getTable().getType() == PTableType.VIEW) {
            throw new ReadOnlyTableException("Mutations not allowed for a view (" + tableRef.getTable() + ")");
        }
       
        final boolean hasLimit = delete.getLimit() != null;
        boolean runOnServer = isAutoCommit && !hasLimit && !hasImmutableIndex(tableRef);
        HintNode hint = delete.getHint();
        if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
            hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
        }

        PTable table = tableRef.getTable();
        List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(table.getPKColumns().size());
        for (int i = table.getBucketNum() == null ? 0 : 1; i < table.getPKColumns().size(); i++) {
            PColumn column = table.getPKColumns().get(i);
            String name = column.getName().getString();
            aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, name, name)));
        }
        SelectStatement select = FACTORY.select(
                Collections.singletonList(delete.getTable()),
                hint, false, aliasedNodes, delete.getWhere(),
                Collections.<ParseNode>emptyList(), null,
                delete.getOrderBy(), delete.getLimit(),
                delete.getBindCount(), false);
        DeletingParallelIteratorFactory parallelIteratorFactory = hasLimit ? null : new DeletingParallelIteratorFactory(connection, tableRef);
        final QueryPlan plan = new QueryOptimizer(services).optimize(select, statement, Collections.<PColumn>emptyList(), parallelIteratorFactory);
        runOnServer &= plan.getTableRef().equals(tableRef);
       
        final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
        if (hasImmutableIndexWithKeyValueColumns(tableRef)) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_DELETE_IF_IMMUTABLE_INDEX).setSchemaName(tableRef.getTable().getSchemaName().getString())
            .setTableName(tableRef.getTable().getTableName().getString()).build().buildException();
        }
       
        final StatementContext context = plan.getContext();
        // If we're doing a query for a single row with no where clause, then we don't need to contact the server at all.
        // A simple check of the none existence of a where clause in the parse node is not sufficient, as the where clause
        // may have been optimized out.
        if (runOnServer && context.isSingleRowScan()) {
            final ImmutableBytesPtr key = new ImmutableBytesPtr(context.getScan().getStartRow());
            return new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public MutationState execute() {
                    Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutation = Maps.newHashMapWithExpectedSize(1);
                    mutation.put(key, null);
                    return new MutationState(tableRef, mutation, 0, maxSize, connection);
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    return new ExplainPlan(Collections.singletonList("DELETE SINGLE ROW"));
                }

                @Override
                public PhoenixConnection getConnection() {
                    return connection;
                }
            };
        } else if (runOnServer) {
            // TODO: better abstraction
            Scan scan = context.getScan();
            scan.setAttribute(UngroupedAggregateRegionObserver.DELETE_AGG, QueryConstants.TRUE);

            // Build an ungrouped aggregate query: select COUNT(*) from <table> where <where>
            // The coprocessor will delete each row returned from the scan
            // Ignoring ORDER BY, since with auto commit on and no limit makes no difference
            SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
            final RowProjector projector = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
            final QueryPlan aggPlan = new AggregatePlan(context, select, tableRef, projector, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
            return new MutationPlan() {

                @Override
                public PhoenixConnection getConnection() {
                    return connection;
                }

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public MutationState execute() throws SQLException {
                    // TODO: share this block of code with UPSERT SELECT
                    ImmutableBytesWritable ptr = context.getTempPtr();
                    tableRef.getTable().getIndexMaintainers(ptr);
                    ServerCache cache = null;
                    try {
                        if (ptr.getLength() > 0) {
                            IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
                            cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
View Full Code Here

Examples of org.apache.phoenix.schema.TableRef

    public MutationPlan compile(UpsertStatement upsert) throws SQLException {
        final PhoenixConnection connection = statement.getConnection();
        ConnectionQueryServices services = connection.getQueryServices();
        final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
        final ColumnResolver resolver = FromCompiler.getResolver(upsert, connection);
        final TableRef tableRef = resolver.getTables().get(0);
        PTable table = tableRef.getTable();
        if (table.getType() == PTableType.VIEW) {
            throw new ReadOnlyTableException("Mutations not allowed for a view (" + tableRef + ")");
        }
        // Setup array of column indexes parallel to values that are going to be set
        List<ColumnName> columnNodes = upsert.getColumns();
        List<PColumn> allColumns = table.getColumns();

        int[] columnIndexesToBe;
        int[] pkSlotIndexesToBe;
        List<PColumn> targetColumns;
        boolean isSalted = table.getBucketNum() != null;
        int posOffset = isSalted ? 1 : 0;
        // Allow full row upsert if no columns or only dynamic one are specified and values count match
        int numColsInUpsert = columnNodes.size();
        int numColsInTable = table.getColumns().size();
        if (columnNodes.isEmpty() || numColsInUpsert == upsert.getTable().getDynamicColumns().size()) {
            columnIndexesToBe = new int[allColumns.size() - posOffset];
            pkSlotIndexesToBe = new int[columnIndexesToBe.length];
            targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
            targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
            for (int i = posOffset, j = posOffset; i < allColumns.size(); i++) {
                PColumn column = allColumns.get(i);
                columnIndexesToBe[i-posOffset] = i;
                targetColumns.set(i-posOffset, column);
                if (SchemaUtil.isPKColumn(column)) {
                    pkSlotIndexesToBe[i-posOffset] = j++;
                }
            }
        } else {
            columnIndexesToBe = new int[numColsInUpsert];
            pkSlotIndexesToBe = new int[columnIndexesToBe.length];
            targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
            targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
            Arrays.fill(columnIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
            Arrays.fill(pkSlotIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
            BitSet pkColumnsSet = new BitSet(table.getPKColumns().size());
            for (int i =0; i < numColsInUpsert; i++) {
                ColumnName colName = columnNodes.get(i);
                ColumnRef ref = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName());
                columnIndexesToBe[i] = ref.getColumnPosition();
                targetColumns.set(i, ref.getColumn());
                if (SchemaUtil.isPKColumn(ref.getColumn())) {
                    pkColumnsSet.set(pkSlotIndexesToBe[i] = ref.getPKSlotPosition());
                }
            }
            int i = posOffset;
            for ( ; i < table.getPKColumns().size(); i++) {
                PColumn pkCol = table.getPKColumns().get(i);
                if (!pkColumnsSet.get(i)) {
                    if (!pkCol.isNullable()) {
                        throw new ConstraintViolationException(table.getName().getString() + "." + pkCol.getName().getString() + " may not be null");
                    }
                }
            }
        }
       
        List<ParseNode> valueNodes = upsert.getValues();
        QueryPlan plan = null;
        RowProjector rowProjectorToBe = null;
        int nValuesToSet;
        boolean runOnServer = false;
        UpsertingParallelIteratorFactory upsertParallelIteratorFactoryToBe = null;
        final boolean isAutoCommit = connection.getAutoCommit();
        if (valueNodes == null) {
            SelectStatement select = upsert.getSelect();
            assert(select != null);
            TableRef selectTableRef = FromCompiler.getResolver(select, connection).getTables().get(0);
            boolean sameTable = tableRef.equals(selectTableRef);
            /* We can run the upsert in a coprocessor if:
             * 1) the into table matches from table
             * 2) the select query isn't doing aggregation
             * 3) autoCommit is on
View Full Code Here

Examples of org.apache.phoenix.schema.TableRef

    @Override
    protected Scanner newScanner(ConnectionQueryServices services) throws SQLException {
        // Set any scan attributes before creating the scanner, as it will be too late afterwards
        context.getScan().setAttribute(ScanRegionObserver.NON_AGGREGATE_QUERY, QueryConstants.TRUE);
        ResultIterator scanner;
        TableRef tableRef = this.getTableRef();
        PTable table = tableRef.getTable();
        boolean isSalted = table.getBucketNum() != null;
        /* If no limit or topN, use parallel iterator so that we get results faster. Otherwise, if
         * limit is provided, run query serially.
         */
        boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
View Full Code Here

Examples of org.apache.phoenix.schema.TableRef

    public QueryPlan compile(SelectStatement select) throws SQLException {
        PhoenixConnection connection = statement.getConnection();
        List<Object> binds = statement.getParameters();
        ColumnResolver resolver = FromCompiler.getResolver(select, connection);
        select = StatementNormalizer.normalize(select, resolver);
        TableRef tableRef = resolver.getTables().get(0);
        StatementContext context = new StatementContext(select, connection, resolver, binds, scan);
        // Short circuit out if we're compiling an index query and the index isn't active.
        // We must do this after the ColumnResolver resolves the table, as we may be updating the local
        // cache of the index table and it may now be inactive.
        if (tableRef.getTable().getType() == PTableType.INDEX && tableRef.getTable().getIndexState() != PIndexState.ACTIVE) {
            return new DegenerateQueryPlan(context, select, tableRef);
        }
        Integer limit = LimitCompiler.compile(context, select);

        GroupBy groupBy = GroupByCompiler.compile(context, select);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.