Package org.apache.phoenix.schema

Examples of org.apache.phoenix.schema.PTable


     * Ensures that metaData mutations are handled in the correct order
     */
    private PMetaData metaDataMutated(String tableName, long tableSeqNum, Mutator mutator) throws SQLException {
        synchronized(latestMetaDataLock) {
            PMetaData metaData = latestMetaData;
            PTable table;
            long endTime = System.currentTimeMillis() + DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS;
            while (true) {
                try {
                    try {
                        table = metaData.getTable(tableName);
                        /* If the table is at the prior sequence number, then we're good to go.
                         * We know if we've got this far, that the server validated the mutations,
                         * so we'd just need to wait until the other connection that mutated the same
                         * table is processed.
                         */
                        if (table.getSequenceNumber() + 1 == tableSeqNum) {
                            // TODO: assert that timeStamp is bigger that table timeStamp?
                            metaData = mutator.mutate(metaData);
                            break;
                        } else if (table.getSequenceNumber() >= tableSeqNum) {
                            logger.warn("Attempt to cache older version of " + tableName + ": current= " + table.getSequenceNumber() + ", new=" + tableSeqNum);
                            break;
                        }
                    } catch (TableNotFoundException e) {
                    }
                    long waitTime = endTime - System.currentTimeMillis();
View Full Code Here


            int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
            boolean isAutoCommit = connection.getAutoCommit();
            byte[][] values = new byte[columnIndexes.length][];
            int rowCount = 0;
            Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutation = Maps.newHashMapWithExpectedSize(batchSize);
            PTable table = tableRef.getTable();
            ResultSet rs = new PhoenixResultSet(iterator, projector, statement);
            while (rs.next()) {
                for (int i = 0; i < values.length; i++) {
                    PColumn column = table.getColumns().get(columnIndexes[i]);
                    byte[] byteValue = rs.getBytes(i+1);
                    Object value = rs.getObject(i+1);
                    int rsPrecision = rs.getMetaData().getPrecision(i+1);
                    Integer precision = rsPrecision == 0 ? null : rsPrecision;
                    int rsScale = rs.getMetaData().getScale(i+1);
View Full Code Here

            return dataPlan;
        }
        // Get the statement as it's been normalized now
        // TODO: the recompile for the index tables could skip the normalize step
        select = (SelectStatement)dataPlan.getStatement();
        PTable dataTable = dataPlan.getTableRef().getTable();
        List<PTable>indexes = Lists.newArrayList(dataTable.getIndexes());
        if (indexes.isEmpty() || dataPlan.getTableRef().hasDynamicCols() || select.getHint().hasHint(Hint.NO_INDEX)) {
            return dataPlan;
        }
       
        // The targetColumns is set for UPSERT SELECT to ensure that the proper type conversion takes place.
View Full Code Here

                    if (plan1.getGroupBy().isOrderPreserving() != plan2.getGroupBy().isOrderPreserving()) {
                        return plan1.getGroupBy().isOrderPreserving() ? -1 : 1;
                    }
                }
                // Use smaller table (table with fewest kv columns)
                PTable table1 = plan1.getTableRef().getTable();
                PTable table2 = plan2.getTableRef().getTable();
                c = (table1.getColumns().size() - table1.getPKColumns().size()) - (table2.getColumns().size() - table2.getPKColumns().size());
                if (c != 0) return c;
               
                // All things being equal, just use the index table
                // TODO: have hint that drives this
                if (plan1.getTableRef().getTable().getType() == PTableType.INDEX) {
View Full Code Here

    public static RowProjector compile(StatementContext context, SelectStatement statement, GroupBy groupBy) throws SQLException  {
        return compile(context, statement, groupBy, Collections.<PColumn>emptyList());
    }
   
    private static void projectAllTableColumns(StatementContext context, TableRef tableRef, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
        PTable table = tableRef.getTable();
        for (int i = table.getBucketNum() == null ? 0 : 1; i < table.getColumns().size(); i++) {
            ColumnRef ref = new ColumnRef(tableRef,i);
            Expression expression = ref.newColumnExpression();
            projectedExpressions.add(expression);
            projectedColumns.add(new ExpressionProjector(ref.getColumn().getName().getString(), table.getName().getString(), expression, false));
        }
    }
View Full Code Here

            projectedColumns.add(new ExpressionProjector(ref.getColumn().getName().getString(), table.getName().getString(), expression, false));
        }
    }
   
    private static void projectAllIndexColumns(StatementContext context, TableRef tableRef, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
        PTable index = tableRef.getTable();
        PTable table = context.getConnection().getPMetaData().getTable(index.getParentName().getString());
        int tableOffset = table.getBucketNum() == null ? 0 : 1;
        int indexOffset = index.getBucketNum() == null ? 0 : 1;
        if (index.getColumns().size()-indexOffset != table.getColumns().size()-tableOffset) {
            // We'll end up not using this by the optimizer, so just throw
            throw new ColumnNotFoundException(WildcardParseNode.INSTANCE.toString());
        }
        for (int i = tableOffset; i < table.getColumns().size(); i++) {
            PColumn tableColumn = table.getColumns().get(i);
            PColumn indexColumn = index.getColumn(IndexUtil.getIndexColumnName(tableColumn));
            ColumnRef ref = new ColumnRef(tableRef,indexColumn.getPosition());
            Expression expression = ref.newColumnExpression();
            projectedExpressions.add(expression);
            ExpressionProjector projector = new ExpressionProjector(tableColumn.getName().getString(), table.getName().getString(), expression, false);
            projectedColumns.add(projector);
        }
    }
View Full Code Here

            projectedColumns.add(projector);
        }
    }
   
    private static void projectTableColumnFamily(StatementContext context, String cfName, TableRef tableRef, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
        PTable table = tableRef.getTable();
        PColumnFamily pfamily = table.getColumnFamily(cfName);
        for (PColumn column : pfamily.getColumns()) {
            ColumnRef ref = new ColumnRef(tableRef, column.getPosition());
            Expression expression = ref.newColumnExpression();
            projectedExpressions.add(expression);
            projectedColumns.add(new ExpressionProjector(column.getName().toString(), table.getName()
                    .getString(), expression, false));
        }
    }
View Full Code Here

                    .getString(), expression, false));
        }
    }

    private static void projectIndexColumnFamily(StatementContext context, String cfName, TableRef tableRef, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
        PTable index = tableRef.getTable();
        PTable table = context.getConnection().getPMetaData().getTable(index.getParentName().getString());
        PColumnFamily pfamily = table.getColumnFamily(cfName);
        for (PColumn column : pfamily.getColumns()) {
            PColumn indexColumn = index.getColumn(IndexUtil.getIndexColumnName(column));
            ColumnRef ref = new ColumnRef(tableRef, indexColumn.getPosition());
            Expression expression = ref.newColumnExpression();
            projectedExpressions.add(expression);
            projectedColumns.add(new ExpressionProjector(column.getName().toString(),
                    table.getName().getString(), expression, false));
        }
    }
View Full Code Here

        List<AliasedNode> aliasedNodes = statement.getSelect();
        // Setup projected columns in Scan
        SelectClauseVisitor selectVisitor = new SelectClauseVisitor(context, groupBy);
        List<ExpressionProjector> projectedColumns = new ArrayList<ExpressionProjector>();
        TableRef tableRef = context.getResolver().getTables().get(0);
        PTable table = tableRef.getTable();
        boolean isWildcard = false;
        Scan scan = context.getScan();
        int index = 0;
        List<Expression> projectedExpressions = Lists.newArrayListWithExpectedSize(aliasedNodes.size());
        List<byte[]> projectedFamilies = Lists.newArrayListWithExpectedSize(aliasedNodes.size());
        for (AliasedNode aliasedNode : aliasedNodes) {
            ParseNode node = aliasedNode.getNode();
            // TODO: visitor?
            if (node instanceof WildcardParseNode) {
                if (statement.isAggregate()) {
                    ExpressionCompiler.throwNonAggExpressionInAggException(node.toString());
                }
                isWildcard = true;
                if (tableRef.getTable().getType() == PTableType.INDEX && ((WildcardParseNode)node).isRewrite()) {
                   projectAllIndexColumns(context, tableRef, projectedExpressions, projectedColumns);
                } else {
                    projectAllTableColumns(context, tableRef, projectedExpressions, projectedColumns);
                }
            } else if (node instanceof  FamilyWildcardParseNode){
                // Project everything for SELECT cf.*
                String cfName = ((FamilyWildcardParseNode) node).getName();
                // Delay projecting to scan, as when any other column in the column family gets
                // added to the scan, it overwrites that we want to project the entire column
                // family. Instead, we do the projection at the end.
                // TODO: consider having a ScanUtil.addColumn and ScanUtil.addFamily to work
                // around this, as this code depends on this function being the last place where
                // columns are projected (which is currently true, but could change).
               projectedFamilies.add(Bytes.toBytes(cfName));
               if (tableRef.getTable().getType() == PTableType.INDEX && ((FamilyWildcardParseNode)node).isRewrite()) {
                   projectIndexColumnFamily(context, cfName, tableRef, projectedExpressions, projectedColumns);
                } else {
                    projectTableColumnFamily(context, cfName, tableRef, projectedExpressions, projectedColumns);
                }
            } else {
                Expression expression = node.accept(selectVisitor);
                projectedExpressions.add(expression);
                if (index < targetColumns.size()) {
                    PDatum targetColumn = targetColumns.get(index);
                    if (targetColumn.getDataType() != expression.getDataType()) {
                        PDataType targetType = targetColumn.getDataType();
                        // Check if coerce allowed using more relaxed isCastableTo check, since we promote INTEGER to LONG
                        // during expression evaluation and then convert back to INTEGER on UPSERT SELECT (and we don't have
                        // (an actual value we can specifically check against).
                        if (expression.getDataType() != null && !expression.getDataType().isCastableTo(targetType)) {
                            throw new ArgumentTypeMismatchException(targetType, expression.getDataType(), "column: " + targetColumn);
                        }
                        expression = CoerceExpression.create(expression, targetType);
                    }
                }
                if (node instanceof BindParseNode) {
                    context.getBindManager().addParamMetaData((BindParseNode)node, expression);
                }
                if (!node.isConstant()) {
                    if (!selectVisitor.isAggregate() && statement.isAggregate()) {
                        ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString());
                    }
                }
                String columnAlias = aliasedNode.getAlias();
                boolean isCaseSensitive = aliasedNode.isCaseSensitve() || selectVisitor.isCaseSensitive;
                String name = columnAlias == null ? expression.toString() : columnAlias;
                projectedColumns.add(new ExpressionProjector(name, table.getName().getString(), expression, isCaseSensitive));
            }
            selectVisitor.reset();
            index++;
        }

        int estimatedKeySize = table.getRowKeySchema().getEstimatedValueLength();
        int estimatedByteSize = 0;
        for (Map.Entry<byte[],NavigableSet<byte[]>> entry : scan.getFamilyMap().entrySet()) {
            PColumnFamily family = table.getColumnFamily(entry.getKey());
            if (entry.getValue() == null) {
                for (PColumn column : family.getColumns()) {
                    Integer byteSize = column.getByteSize();
                    estimatedByteSize += SizedUtil.KEY_VALUE_SIZE + estimatedKeySize + (byteSize == null ? RowKeySchema.ESTIMATED_VARIABLE_LENGTH_SIZE : byteSize);
                }
            } else {
                for (byte[] cq : entry.getValue()) {
                    PColumn column = family.getColumn(cq);
                    Integer byteSize = column.getByteSize();
                    estimatedByteSize += SizedUtil.KEY_VALUE_SIZE + estimatedKeySize + (byteSize == null ? RowKeySchema.ESTIMATED_VARIABLE_LENGTH_SIZE : byteSize);
                }
            }
        }
       
        selectVisitor.compile();
        // Since we don't have the empty key value in read-only tables,
        // we must project everything.
        boolean isProjectEmptyKeyValue = table.getType() != PTableType.VIEW && !isWildcard;
        if (isProjectEmptyKeyValue) {
            for (byte[] family : projectedFamilies) {
                projectColumnFamily(table, scan, family);      
            }
        } else {
View Full Code Here

        }
        int nIndexes = 0;
        int estimatedSize = dataTable.getRowKeySchema().getEstimatedByteSize() + 2;
        while (indexes.hasNext()) {
            nIndexes++;
            PTable index = indexes.next();
            estimatedSize += index.getIndexMaintainer(dataTable).getEstimatedByteSize();
        }
        TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedSize + 1);
        DataOutput output = new DataOutputStream(stream);
        try {
            // Encode data table salting in sign of number of indexes
View Full Code Here

TOP

Related Classes of org.apache.phoenix.schema.PTable

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.