Package org.voltdb.catalog

Examples of org.voltdb.catalog.Column


            } // SYNCH

            Object tuple[] = new Object[types.length];
            int row_idx = 0;
            for (int col_idx = 0; col_idx < types.length; col_idx++) {
                Column catalog_col = catalog_tbl.getColumns().get(col_idx);
                assert (catalog_col != null) : "The column at position " + col_idx + " for " + catalog_tbl + " is null";

                // Auto-generate first column
                if (col_idx == 0 && auto_generate_first_column) {
                    tuple[col_idx] = new Long(line_ctr);
                }
                // Null Values
                else if (row_idx >= row.length) {
                    tuple[col_idx] = null;
                }
                // Foreign Keys
                else if (fkeys[col_idx]) {
                    tuple[col_idx] = row[row_idx++];
                }
                // Timestamps
                else if (types[col_idx] == VoltType.TIMESTAMP) {
                    for (DateFormat f : timestamp_formats) {
                        try {
                            tuple[col_idx] = f.parse(row[row_idx]);
                        } catch (ParseException ex) {
                            // Ignore...
                        }
                        if (tuple[col_idx] != null)
                            break;
                    } // FOR
                    if (tuple[col_idx] == null) {
                        throw new RuntimeException("Line " + TableDataIterable.this.line_ctr + ": Invalid timestamp format '" + row[row_idx] + "' for " + catalog_col);
                    }
                    row_idx++;
                }
                // Store string (truncate if necessary)
                else if (types[col_idx] == VoltType.STRING) {
                    // Clip columns that are larger than our limit
                    int limit = catalog_col.getSize();
                    if (row[row_idx].length() > limit) {
                        if (!truncate_warnings.contains(catalog_col)) {
                            LOG.warn("Line " + TableDataIterable.this.line_ctr + ": Truncating data for " + catalog_col.fullName() + " because size " + row[row_idx].length() + " > " + limit);
                            truncate_warnings.add(catalog_col);
                        }
                        row[row_idx] = row[row_idx].substring(0, limit);
                    }
                    tuple[col_idx] = row[row_idx++];
View Full Code Here


        double total_memory_used = 0.0;
        boolean calculate_memory = (hints.force_replication_size_limit != null && hints.max_memory_per_partition != 0);
        for (Table catalog_tbl : CatalogUtil.getDataTables(info.catalogContext.database)) {
            String table_key = CatalogKey.createKey(catalog_tbl);
            TableEntry pentry = null;
            Column col = null;
            Collection<Column> pkey_columns = CatalogUtil.getPrimaryKeyColumns(catalog_tbl);

            TableStatistics ts = info.stats.getTableStatistics(catalog_tbl);
            assert (ts != null) : "Null TableStatistics for " + catalog_tbl;
            double size_ratio = (calculate_memory ? (ts.tuple_size_total / (double) hints.max_memory_per_partition) : 0);

            // Replication
            if (hints.force_replication.contains(table_key) || (calculate_memory && ts.readonly && size_ratio <= hints.force_replication_size_limit) || pkey_columns.isEmpty()) {
                total_memory_used += size_ratio;
                if (debug.val)
                    LOG.debug("Choosing " + catalog_tbl.getName() + " for replication");
                col = ReplicatedColumn.get(catalog_tbl);
                pentry = new TableEntry(PartitionMethodType.REPLICATION, col, null, null);

                // Hash Primary Key
            } else {
                total_memory_used += (size_ratio / (double) info.getNumPartitions());

                if (hints.enable_multi_partitioning == false || pkey_columns.size() == 1) {
                    col = CollectionUtil.first(pkey_columns);
                    pentry = new TableEntry(PartitionMethodType.HASH, col, null, null);
                } else {
                    col = MultiColumn.get(pkey_columns.toArray(new Column[0]));
                    pentry = new TableEntry(PartitionMethodType.HASH, col, null, null);
                }
                assert (pentry.attribute != null) : catalog_tbl;
            }
            if (debug.val)
                LOG.debug(String.format("[%02d] %s", pplan.getTableCount(), col.fullName()));
            pplan.getTableEntries().put(catalog_tbl, pentry);
        } // FOR
        assert (total_memory_used <= 100) : "Too much memory per partition: " + total_memory_used;

        if (hints.enable_procparameter_search) {
View Full Code Here

            // to it and
            // make a new edge between the parent and child
            DesignerVertex vertex = graph.getVertex(catalog_table);
            for (int ctr = 0, cnt = fkey_ref_consts.get(vertex).size(); ctr < cnt; ctr++) {
                Constraint catalog_const = fkey_ref_consts.get(vertex).get(ctr);
                Column catalog_col = fkey_ref_cols.get(vertex).get(ctr);

                //
                // Grab the table object used in this foreign key constraint
                // We then get the vertex that we're using to represent it
                //
View Full Code Here

                // decreases every round
                // until it's at the point where we won't select replication at
                // all
                int idx = this.rand.nextInt((size * rounds) + (round < 0 ? 0 : round)) / rounds;

                Column catalog_col = null;
                if (idx == size) {
                    catalog_col = ReplicatedColumn.get(catalog_tbl);
                    if (calculate_memory)
                        total_memory += ts.tuple_size_total;
                } else {
View Full Code Here

        if (object.has(Members.COLUMN_STATS.name())) {
            JSONObject jsonObject = object.getJSONObject(Members.COLUMN_STATS.name());
            Iterator<String> col_keys = jsonObject.keys();
            while (col_keys.hasNext()) {
                String col_key = col_keys.next();
                Column catalog_col = CatalogKey.getFromKey(catalog_db, col_key, Column.class);
                ColumnStatistics col_stats = new ColumnStatistics(catalog_col);
                col_stats.fromJSONObject(jsonObject.getJSONObject(col_key), catalog_db);
                this.column_stats.put(col_key, col_stats);
            } // WHILE
        }
View Full Code Here

                    catalog_child = (T) ReplicatedColumn.get((Table) catalog_parent);

                    // SPECIAL CASE: VerticalPartitionColumn
                } else if (multiattribute_key != null && multiattribute_key.equalsIgnoreCase(VerticalPartitionColumn.PREFIX)) {
                    JSONArray jsonArray = jsonObject.getJSONArray(orig_parent_key);
                    Column params[] = new Column[jsonArray.length()];
                    for (int i = 0; i < params.length; i++) {
                        params[i] = getFromKey(catalog_db, jsonArray.getJSONObject(i), Column.class);
                    } // FOR
                    assert (params.length == 2) : "Invalid VerticalPartitionColumn Key: " + child_key;
                    catalog_child = (T) VerticalPartitionColumn.get(params[0], (MultiColumn) params[1]);

                    // SPECIAL CASE: MultiColumn
                } else if (multiattribute_key != null && multiattribute_key.equals(MultiColumn.PREFIX)) {
                    JSONArray jsonArray = jsonObject.getJSONArray(orig_parent_key);
                    Column params[] = new Column[jsonArray.length()];
                    for (int i = 0; i < params.length; i++) {
                        params[i] = getFromKey(catalog_db, jsonArray.getJSONObject(i), Column.class);
                        assert (params[i] != null) : "Invalid catalog key " + jsonArray.getJSONObject(i);
                    } // FOR
                    assert (params.length > 0) : "Invalid MultiColumn Key: " + child_key;
View Full Code Here

            }

            for (DesignerEdge e : this.agraph.getIncidentEdges(v)) {
                PredicatePairs cset = e.getAttribute(AccessGraph.EdgeAttributes.COLUMNSET);
                assert (cset != null);
                Column catalog_col = CollectionUtil.first(cset.findAllForParent(Column.class, catalog_tbl));
                Collection<Column> candidates = new HashSet<Column>();

                if (catalog_col == null)
                    LOG.fatal("Failed to find column for " + catalog_tbl + " in ColumnSet:\n" + cset);

                if (catalog_col.getNullable()) {
                    if (debug.val)
                        LOG.warn("Ignoring nullable horizontal partition column candidate " + catalog_col.fullName());
                } else {
                    // Always add the base column without any vertical
                    // partitioning
                    candidates.add(catalog_col);

                    // Maintain a reverse index from Columns to DesignerEdges
                    Collection<DesignerEdge> col_edges = this.column_edge_xref.get(catalog_col);
                    if (col_edges == null) {
                        col_edges = new HashSet<DesignerEdge>();
                        this.column_edge_xref.put(catalog_col, col_edges);
                    }
                    col_edges.add(e);

                    // Pre-generate all of the vertical partitions that we will
                    // need during the search
                    if (hints.enable_vertical_partitioning) {
                        Collection<VerticalPartitionColumn> vp_candidates = col_vps.get(catalog_col);
                        if (vp_candidates == null) {
                            try {
                                vp_candidates = VerticalPartitionerUtil.generateCandidates(catalog_col, info.stats);
                                col_vps.put(catalog_col, vp_candidates);
                            } catch (Throwable ex) {
                                LOG.warn("Failed to generate vertical partition candidates for " + catalog_col.fullName(), ex);
                            }
                        }
                        if (vp_candidates != null)
                            candidates.addAll(vp_candidates);
                    }
View Full Code Here

            // If this Table's partitioning column is set, then we can look at
            // the AccessGraph
            // and remove any edges that don't use that column
            if (catalog_tbl.getSystable() == false && catalog_tbl.getIsreplicated() == false && catalog_tbl.getPartitioncolumn() != null) {
                Column catalog_col = catalog_tbl.getPartitioncolumn();
                assert (catalog_col != null);
                if (catalog_col instanceof VerticalPartitionColumn) {
                    VerticalPartitionColumn vp_col = (VerticalPartitionColumn) catalog_col;
                    catalog_col = vp_col.getHorizontalColumn();
                    assert (catalog_col != null);
View Full Code Here

            DesignerVertex v = this.agraph.getVertex(catalog_tbl);
            if (v == null) {
                throw new IllegalArgumentException("Missing vertex for " + catalog_tbl);
            }

            Column repcol = this.repcolumns.get(catalog_tbl);
            if (repcol != null) {
                ret.add((T) repcol);
                total_cols++;
            }
View Full Code Here

        this.preprocess((Database) catalog_col.getParent().getParent());
    }

    @Override
    public Column getCatalogItem(Database catalog_db) {
        Column ret = CatalogKey.getFromKey(catalog_db, this.catalog_key, Column.class);
        return (ret);
    }
View Full Code Here

TOP

Related Classes of org.voltdb.catalog.Column

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.