Package edu.brown.catalog.special

Examples of edu.brown.catalog.special.VerticalPartitionColumn


        Table catalog_tbl = this.getTable(clone_db, TM1Constants.TABLENAME_SUBSCRIBER);
        Column target_col = this.getColumn(catalog_tbl, "S_ID");
        Collection<VerticalPartitionColumn> candidates = VerticalPartitionerUtil.generateCandidates(target_col, info.stats);
        assertNotNull(candidates);
        assertFalse(candidates.isEmpty());
        VerticalPartitionColumn vpc = CollectionUtil.first(candidates);
        assertNotNull(vpc);
        assertFalse(vpc.isUpdateApplied());

        // Create a filter that only has the procedures that will be optimized by our VerticalPartitionColumn
        ProcedureNameFilter filter = new ProcedureNameFilter(false);
        for (Statement catalog_stmt : vpc.getOptimizedQueries()) {
            filter.include(catalog_stmt.getParent().getName(), 1);
        } // FOR
       
        // Calculate the cost *BEFORE* applying the vertical partition optimization
        double expected_cost = costModel.estimateWorkloadCost(clone_catalogContext, workload, filter, null);
        System.err.println("ORIGINAL COST: " + expected_cost);
       
        // Now apply the update and get the new cost. It should be lower
        // We have to clear the cache for these queries first though
        vpc.applyUpdate();
        costModel.invalidateCache(vpc.getOptimizedQueries());
        double new_cost = costModel.estimateWorkloadCost(clone_catalogContext, workload, filter, null);
        System.err.println("NEW COST: " + new_cost);
        assert(new_cost < expected_cost) : String.format("%f < %f", new_cost, expected_cost);
    }
View Full Code Here


        Table catalog_tbl = this.getTable(clone_db, TM1Constants.TABLENAME_SUBSCRIBER);
        Column target_col = this.getColumn(catalog_tbl, "S_ID");
        Collection<VerticalPartitionColumn> candidates = VerticalPartitionerUtil.generateCandidates(target_col, info.stats);
        assertNotNull(candidates);
        assertFalse(candidates.isEmpty());
        VerticalPartitionColumn vpc = CollectionUtil.first(candidates);
        assertNotNull(vpc);
        assertFalse(vpc.isUpdateApplied());

        // Create a filter that only has the procedures that will be optimized by our VerticalPartitionColumn
        ProcedureNameFilter filter = new ProcedureNameFilter(false);
        for (Statement catalog_stmt : vpc.getOptimizedQueries()) {
            filter.include(catalog_stmt.getParent().getName(), 1);
        } // FOR
       
        // Calculate the cost *BEFORE* applying the vertical partition optimization
        double expected_cost = costModel.estimateWorkloadCost(clone_catalogContext, workload, filter, null);
        System.err.println("ORIGINAL COST: " + expected_cost);
        Map<Long, TransactionCacheEntry> expected_entries = new HashMap<Long, TransactionCacheEntry>();
        for (TransactionCacheEntry txn_entry : costModel.getTransactionCacheEntries()) {
            // There should be no unknown queries and all transactions should be multi-sited
            assertEquals(txn_entry.toString(), 0, txn_entry.getUnknownQueryCount());
            assertFalse(txn_entry.isSinglePartitioned());
           
            TransactionCacheEntry clone = (TransactionCacheEntry)txn_entry.clone();
            assertNotSame(txn_entry, clone);
            expected_entries.put(txn_entry.getTransactionId(), clone);
            // System.err.println(StringUtil.columns(txn_entry.debug(), clone.debug()));
            // System.err.println(StringUtil.SINGLE_LINE);
        } // FOR
        assertFalse(expected_entries.isEmpty());
        for (Statement catalog_stmt : vpc.getOptimizedQueries()) {
            Collection<QueryCacheEntry> entries = costModel.getQueryCacheEntries(catalog_stmt);
            assertNotNull(entries);
            assertFalse(entries.isEmpty());
        } // FOR
       
        // Now apply the update and get the new cost. We don't care what the cost
        // is because SingleSitedCostModel only looks to see whether a txn is single-partition
        // and not how many partition it actually touches
        // We have to clear the cache for these queries first though
        vpc.applyUpdate();
        costModel.invalidateCache(vpc.getOptimizedQueries());
        double new_cost = costModel.estimateWorkloadCost(clone_catalogContext, workload, filter, null);
        System.err.println("NEW COST: " + new_cost);
        Collection<TransactionCacheEntry> new_entries = costModel.getTransactionCacheEntries();
        assertNotNull(new_entries);
        assertEquals(expected_entries.size(), new_entries.size());
View Full Code Here

        Table catalog_tbl = this.getTable(clone_db, TM1Constants.TABLENAME_SUBSCRIBER);
        Column target_col = this.getColumn(catalog_tbl, "S_ID");
        Collection<VerticalPartitionColumn> candidates = VerticalPartitionerUtil.generateCandidates(target_col, info.stats);
        assertNotNull(candidates);
        assertFalse(candidates.isEmpty());
        VerticalPartitionColumn vpc = CollectionUtil.first(candidates);
        assertNotNull(vpc);
        assertFalse(vpc.isUpdateApplied());
       
        // Get the original partitions for the queries before we apply the optimizations
        Map<Statement, Object[]> stmt_params = new HashMap<Statement, Object[]>();
        for (Statement catalog_stmt : vpc.getOptimizedQueries()) {
            // We first need to generate random input parameters
            Object params[] = new Object[catalog_stmt.getParameters().size()];
            for (int i = 0; i < params.length; i++) {
                StmtParameter catalog_param = catalog_stmt.getParameters().get(i);
                VoltType vtype = VoltType.get(catalog_param.getJavatype());
                params[i] = VoltTypeUtil.getRandomValue(vtype);
            } // FOR
            stmt_params.put(catalog_stmt, params);
           
            // Then get the list of partitions that it will access
            // This should always be *all* partitions
            PartitionSet partitions = new PartitionSet();
            p_estimator.getAllPartitions(partitions, catalog_stmt, params, base_partition);
            assertNotNull(partitions);
            assertEquals(CatalogUtil.getNumberOfPartitions(clone_db), partitions.size());
        } // FOR
       
        // Now apply the optimized queries
        // The number of partitions that our Statements touch should be reduced to one
        vpc.applyUpdate();
        assert(vpc.isUpdateApplied());
        for (Statement catalog_stmt : vpc.getOptimizedQueries()) {
            Object params[] = stmt_params.get(catalog_stmt);
            assertNotNull(params);
            PartitionSet partitions = new PartitionSet();
            p_estimator.getAllPartitions(partitions, catalog_stmt, params, base_partition);
            assertNotNull(partitions);
View Full Code Here

        Table catalog_tbl = this.getTable(clone_db, TM1Constants.TABLENAME_SUBSCRIBER);
        Column target_col = this.getColumn(catalog_tbl, "S_ID");
        Collection<VerticalPartitionColumn> candidates = VerticalPartitionerUtil.generateCandidates(target_col, info.stats);
        assertNotNull(candidates);
        assertFalse(candidates.isEmpty());
        VerticalPartitionColumn vpc = CollectionUtil.first(candidates);
        assertNotNull(vpc);
       
        // BEFORE!
        Map<Statement, Map<String, Object>> fields_before = new ListOrderedMap<Statement, Map<String, Object>>();
        for (Statement catalog_stmt : vpc.getOptimizedQueries()) {
            fields_before.put(catalog_stmt, this.generateFieldMap(catalog_stmt));
        } // FOR
//        System.err.println("BEFORE:\n" + StringUtil.formatMaps(fields_before));
       
        // AFTER!
        MaterializedViewInfo catalog_view = vpc.applyUpdate();
        assertNotNull(catalog_view);
        assertEquals(CatalogUtil.getVerticalPartition(catalog_tbl), catalog_view);
        for (Statement catalog_stmt : vpc.getOptimizedQueries()) {
            Map<String, Object> before_m = fields_before.get(catalog_stmt);
            assertNotNull(before_m);
            Map<String, Object> after_m = this.generateFieldMap(catalog_stmt);
            assertEquals(before_m.keySet(), after_m.keySet());
            //System.err.println(StringUtil.columns(StringUtil.formatMaps(before_m),
            //                   StringUtil.formatMaps(after_m)));
           
            for (String f : before_m.keySet()) {
                // Use the MD5 checksum to make sure that these fields have changed
                // Yes I could just compare the original strings but... well, uh... I forget why I did this...
                if (f.endsWith("fullplan")) {
                    assertThat(catalog_stmt.fullName() +" ["+f+"]", before_m.get(f), not(equalTo(after_m.get(f))));
                // Sometimes the Expression tree will be different, sometimes it will be the same
                // So just make sure it's not null/empty
                } else if (f.endsWith("exptree")) {
                    assertNotNull(after_m.get(f));
                    assertFalse(catalog_stmt.fullName() +" ["+f+"]", after_m.get(f).toString().isEmpty());
                // All the other fields should be the same except for secondaryindex + replicated
                } else if (f.equals("secondaryindex") == false && f.equals("replicatedonly") == false) {
                    assertEquals(catalog_stmt.fullName() +" ["+f+"]", before_m.get(f), after_m.get(f));
                }
            } // FOR
        } // FOR
        System.err.println(StringUtil.SINGLE_LINE);
       
        // REVERT!
        vpc.revertUpdate();
        assertNull(CatalogUtil.getVerticalPartition(catalog_tbl));
        for (Statement catalog_stmt : vpc.getOptimizedQueries()) {
            Map<String, Object> before_m = fields_before.get(catalog_stmt);
            assertNotNull(before_m);
            Map<String, Object> revert_m = this.generateFieldMap(catalog_stmt);
            assertEquals(before_m.keySet(), revert_m.keySet());
            //System.err.println(StringUtil.columns(StringUtil.formatMaps(before_m),
View Full Code Here

        Column target_col = this.getColumn(catalog_tbl, "S_ID");
       
        Collection<VerticalPartitionColumn> candidates = VerticalPartitionerUtil.generateCandidates(target_col, info.stats);
        assertNotNull(candidates);
        assertFalse(candidates.isEmpty());
        VerticalPartitionColumn vpc = CollectionUtil.first(candidates);
        assertNotNull(vpc);

        Collection<Column> expected_cols = CollectionUtil.addAll(new HashSet<Column>(), this.getColumn(catalog_tbl, "SUB_NBR"),
                                                                                        this.getColumn(catalog_tbl, "S_ID"));
        assertEquals(expected_cols.size(), vpc.getVerticalMultiColumn().size());
        assertTrue(expected_cols + " <=> " + vpc.getVerticalPartitionColumns(), expected_cols.containsAll(vpc.getVerticalPartitionColumns()));
       
        Collection<Statement> expected_stmts = new HashSet<Statement>();
        expected_stmts.add(this.getStatement(this.getProcedure(DeleteCallForwarding.class), "query"));
        expected_stmts.add(this.getStatement(this.getProcedure(InsertCallForwarding.class), "query1"));
        expected_stmts.add(this.getStatement(this.getProcedure(UpdateLocation.class), "getSubscriber"));
        assertEquals(expected_stmts.size(), vpc.getOptimizedQueries().size());
        assert(expected_stmts.containsAll(vpc.getOptimizedQueries()));
    }
View Full Code Here

    
     // Get the VerticalPartitionColumn that uses S_ID as the horizontal partitioning Column
     // Make sure that the optimized query plans have not been applied yet
     Collection<VerticalPartitionColumn> vp_columns = cp.getVerticalPartitionColumns(catalog_tbl0);
     assertNotNull(vp_columns);
     VerticalPartitionColumn vp_col = null;
     for (VerticalPartitionColumn c : vp_columns) {
         if (c.getHorizontalColumn().equals(catalog_col)) {
             vp_col = c;
             break;
         }
     } // FOR
     assertNotNull(vp_col);
     assertFalse(vp_col.isUpdateApplied());
    
     // Now partition SUBSCRIBER using its VerticalPartitionColumn
     cp.reset(catalog_tbl0);
     catalog_tbl0.setPartitioncolumn(vp_col);
     cp.update(catalog_tbl0);
    
     // And make sure that we get back the Columns we expect and that the Statements
     // have been updated
     Collection<Column> actual_cols = cp.getCandidateValues(catalog_tbl1, Column.class);
     assertNotNull(actual_cols);
     assertEquals(expected_cols.size(), actual_cols.size());
     assertEquals(expected_cols, actual_cols);
     assertFalse(vp_col.isUpdateApplied());
    
     // Revert
     catalog_tbl0.setPartitioncolumn(catalog_col);
     cp.reset(catalog_tbl0);
}
View Full Code Here

                    catalog_tbl.setPartitioncolumn(catalog_col);
                    catalog_tbl.setIsreplicated(false);
                }
                if (catalog_tbl.getPartitioncolumn() instanceof VerticalPartitionColumn) {
                    assert (enableVerticalPartitions) : "Unexpected " + catalog_tbl.getPartitioncolumn().fullName();
                    VerticalPartitionColumn vp_col = (VerticalPartitionColumn) catalog_tbl.getPartitioncolumn();
                    vp_cols.add(vp_col);
                }
            } else {
                if (debug)
                    LOG.warn("Missing PartitionEntry for " + catalog_tbl);
View Full Code Here

            }

            // Keep track of whether we have a VerticalPartitionColumn that
            // needs to be
            // reset after each round
            VerticalPartitionColumn vp_col = null;

            // Get our workload filter for this level of the traversal
            Filter filter = BranchAndBoundPartitioner.this.traversal_filters.get(current);

            // Descendant tables used for memory calculations
            // It's ok for it to be empty. That means we're searching against
            // all of tables
            Set<Table> current_previousTables = this.previous_tables[idx];
            assert (current_previousTables != null) : String.format("No previous tables at index %d? [current=%s, num_tables=%d]", idx, current, num_tables);

            // The local best vertex is the best vertex for this level in the
            // traversal
            StateVertex local_best_vertex = null;

            // Just make sure that if we have a VerticalPartitionColumn in our
            // list of
            // current attributes that it's optimizations are not applied
            // If they are, then we can disable them
            for (CatalogType attribute : current_attributes) {
                if (attribute instanceof VerticalPartitionColumn && ((VerticalPartitionColumn) attribute).isUpdateApplied()) {
                    ((VerticalPartitionColumn) attribute).revertUpdate();
                }
            } // FOR

            // Iterate through the columns and find the one with the best cost
            int attribute_ctr = 0;
            for (CatalogType attribute : current_attributes) {
                assert (attribute != null) : "Null attribute key for " + current + ": " + current_attributes;
                String attribute_key = CatalogKey.createKey(attribute);
                if (trace.val)
                    LOG.trace("Evaluating " + attribute.fullName());
                boolean memory_exceeded = false;
                Long memory = null;
                CatalogType current_attribute = null;

                // Is this the last element we have to look at
                boolean last_attribute = (++attribute_ctr == num_attributes);

                // Dynamic Debugging
                this.cost_model.setDebuggingEnabled(this.hints.isDebuggingEnabled(attribute_key));

                // IMPORTANT: We have to invalidate the cache for the current
                // element *AND* all those levels
                // below us in the search tree!
                if (this.cost_model.isCachingEnabled()) {
                    for (int i = idx; i < this.num_elements; i++) {
                        if (trace.val)
                            LOG.trace("Invalidating " + this.all_search_keys.get(i));
                        this.cost_model.invalidateCache(this.all_search_keys.get(i));
                    } // FOR
                    // If we're not using caching, then just clear out the cost
                    // model completely
                } else {
                    this.cost_model.clear();
                }

                // ----------------------------------------------
                // TABLE PARTITIONING KEY
                // ----------------------------------------------
                if (is_table) {
                    Table current_tbl = (Table) current;
                    Column search_col = (Column) attribute;
                    Column current_col = null;

                    // Check whether this is our replication marker column
                    if (search_col instanceof ReplicatedColumn) {
                        current_tbl.setIsreplicated(true);
                        current_col = ReplicatedColumn.get(current_tbl);
                    }
                    // VerticalPartitionColumn
                    else if (search_col instanceof VerticalPartitionColumn) {
                        // We need to update the statements that can use them
                        // using the pre-compiled query plans
                        current_tbl.setIsreplicated(false);
                        current_col = search_col;
                        vp_col = (VerticalPartitionColumn) search_col;
                        assert (CatalogUtil.getDatabase(vp_col).equals(info.catalogContext.database)) : String.format("VP_COL[%d] != INFO[%d]", CatalogUtil.getDatabase(vp_col).hashCode(),
                                info.catalogContext.database.hashCode());

                        MaterializedViewInfo catalog_view = vp_col.applyUpdate();
                        assert (catalog_view != null) : "Unexpected null MaterializedViewInfo for " + current_tbl + " vertical partition:\n" + vp_col;
                        if (this.cost_model.isCachingEnabled()) {
                            if (trace.val)
                                LOG.trace("Invalidating VerticalPartition Statements in cost model: " + vp_col.getOptimizedQueries());
                            this.cost_model.invalidateCache(vp_col.getOptimizedQueries());
                        }
                        TableStatistics tstats = VerticalPartitionerUtil.computeTableStatistics(vp_col, info.stats);
                        assert (tstats != null);
                        // Add the vp's sys table to the list of tables that we
                        // need to estimate the memory
View Full Code Here

                }
               
                if (all_cols.size() > 1) {
                    MultiColumn vp_col = MultiColumn.get(all_cols.toArray(new Column[all_cols.size()]));
                    assert (partition_col.equals(vp_col) == false) : vp_col;
                    VerticalPartitionColumn vpc = VerticalPartitionColumn.get(partition_col, vp_col);
                    assert (vpc != null) : String.format("Failed to get VerticalPartition column for <%s, %s>", partition_col, vp_col);
                    candidates.add(vpc);
   
                    if (debug.val) {
                        Map<String, Object> m = new ListOrderedMap<String, Object>();
                        m.put("Output Columns", output_cols);
                        m.put("Predicate Columns", stmt_cols);
                        m.put("Horizontal Partitioning", partition_col.fullName());
                        m.put("Vertical Partitioning", vp_col.fullName());
                        LOG.debug("Vertical Partition Candidate: " + catalog_stmt.fullName() + "\n" + StringUtil.formatMaps(m));
                    }
                }
            } // FOR (stmt)
        } // FOR (proc)

        if (debug.val && candidates.size() > 0)
            LOG.debug("Computing vertical partition query plans for " + candidates.size() + " candidates");
        Set<VerticalPartitionColumn> final_candidates = new HashSet<VerticalPartitionColumn>();
        for (VerticalPartitionColumn vpc : candidates) {
            // Make sure our WorkloadStatistics have something for this
            // MaterializedViewInfo
            if (stats != null)
                VerticalPartitionerUtil.computeTableStatistics(vpc, stats);

            if (vpc.hasOptimizedQueries()) {
                if (debug.val)
                    LOG.debug("Skipping candidate that already has optimized queries\n" + vpc.toString());
                final_candidates.add(vpc);
            } else if (generateOptimizedQueries(catalog_db, vpc)) {
                final_candidates.add(vpc);
            }
        } // FOR
View Full Code Here

            // and remove any edges that don't use that column
            if (catalog_tbl.getSystable() == false && catalog_tbl.getIsreplicated() == false && catalog_tbl.getPartitioncolumn() != null) {
                Column catalog_col = catalog_tbl.getPartitioncolumn();
                assert (catalog_col != null);
                if (catalog_col instanceof VerticalPartitionColumn) {
                    VerticalPartitionColumn vp_col = (VerticalPartitionColumn) catalog_col;
                    catalog_col = vp_col.getHorizontalColumn();
                    assert (catalog_col != null);
                }

                DesignerVertex v0 = this.agraph.getVertex(catalog_tbl);
                if (v0 == null)
View Full Code Here

TOP

Related Classes of edu.brown.catalog.special.VerticalPartitionColumn

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.