Package edu.brown.workload

Examples of edu.brown.workload.Workload$WorkloadIterator


     * @param catalog_db
     * @return
     */
    public static DependencyGraph generate(CatalogContext catalogContext) {
        DependencyGraph dgraph = new DependencyGraph(catalogContext.database);
        DesignerInfo info = new DesignerInfo(catalogContext, new Workload(catalogContext.catalog));
        try {
            new DependencyGraphGenerator(info).generate(dgraph);
        } catch (Exception ex) {
            ex.printStackTrace();
            return (null);
View Full Code Here


    /**
     * testWeightedTxnEstimation
     */
    public void testWeightedTxnEstimation() throws Exception {
        // Make a new workload that only has multiple copies of the same multi-partition transaction
        Workload new_workload = new Workload(catalogContext.catalog);
        int num_txns = 13;
        TransactionTrace multip_txn = this.getMultiPartitionTransaction();
        Procedure catalog_proc = multip_txn.getCatalogItem(catalogContext.database);
        for (int i = 0; i < num_txns; i++) {
            TransactionTrace clone = (TransactionTrace)multip_txn.clone();
            clone.setTransactionId(i);
            new_workload.addTransaction(catalog_proc, clone);
        } // FOR
        assertEquals(num_txns, new_workload.getTransactionCount());
       
        // We now want to calculate the cost of this new workload
        final SingleSitedCostModel orig_costModel = new SingleSitedCostModel(catalogContext);
        final double orig_cost = orig_costModel.estimateWorkloadCost(catalogContext, new_workload);
        assert(orig_cost > 0);
        // if (orig_costModel.getMultiPartitionProcedureHistogram().isEmpty()) System.err.println(orig_costModel.getTransactionCacheEntry(0).debug());
        assertEquals(num_txns, orig_costModel.getMultiPartitionProcedureHistogram().getSampleCount());
        assertEquals(0, orig_costModel.getSinglePartitionProcedureHistogram().getSampleCount());
       
        // Only the base partition should be touched (2 * num_txns). Everything else should
        // be touched num_txns
        Integer base_partition = CollectionUtil.first(orig_costModel.getQueryPartitionAccessHistogram().getMaxCountValues());
        assertNotNull(base_partition);
        for (Integer p : orig_costModel.getQueryPartitionAccessHistogram().values()) {
            if (p.equals(base_partition)) {
                assertEquals(2 * num_txns, orig_costModel.getQueryPartitionAccessHistogram().get(p).intValue());
            } else {
                assertEquals(num_txns, orig_costModel.getQueryPartitionAccessHistogram().get(p).intValue());
            }
        } // FOR
       
        // Now change make a new workload that has the same multi-partition transaction
        // but this time it only has one but with a transaction weight
        // We should get back the exact same cost
        new_workload = new Workload(catalogContext.catalog);
        TransactionTrace clone = (TransactionTrace)multip_txn.clone();
        clone.setTransactionId(1000);
        clone.setWeight(num_txns);
        new_workload.addTransaction(catalog_proc, clone);
        final SingleSitedCostModel new_costModel = new SingleSitedCostModel(catalogContext);
        final double new_cost = new_costModel.estimateWorkloadCost(catalogContext, new_workload);
        assert(new_cost > 0);
        assertEquals(orig_cost, new_cost, 0.001);
       
View Full Code Here

    /**
     * testWeightedQueryEstimation
     */
    public void testWeightedQueryEstimation() throws Exception {
        // Make a new workload that has its single queries duplicated multiple times
        Workload new_workload = new Workload(catalogContext.catalog);
        int num_dupes = 7;
        TransactionTrace multip_txn = this.getMultiPartitionTransaction();
        Procedure catalog_proc = multip_txn.getCatalogItem(catalogContext.database);
       
        final TransactionTrace orig_txn = (TransactionTrace)multip_txn.clone();
        List<QueryTrace> clone_queries = new ArrayList<QueryTrace>();
        for (int i = 0; i < num_dupes; i++) {
            for (QueryTrace query_trace : multip_txn.getQueries()) {
                QueryTrace clone_query = (QueryTrace)query_trace.clone();
                clone_queries.add(clone_query);
            } // FOR
        } // FOR
        orig_txn.setQueries(clone_queries);
        new_workload.addTransaction(catalog_proc, orig_txn);
        assertEquals(1, new_workload.getTransactionCount());
        assertEquals(multip_txn.getQueryCount() * num_dupes, orig_txn.getQueryCount());
       
        // We now want to calculate the cost of this new workload
        final SingleSitedCostModel orig_costModel = new SingleSitedCostModel(catalogContext);
        final double orig_cost = orig_costModel.estimateWorkloadCost(catalogContext, new_workload);
        assert(orig_cost > 0);
        TransactionCacheEntry orig_txnEntry = orig_costModel.getTransactionCacheEntry(orig_txn);
        assertNotNull(orig_txnEntry);
        assertEquals(orig_txn.getQueryCount(), orig_txnEntry.getExaminedQueryCount());
//        System.err.println(orig_txnEntry.debug());
//        System.err.println("=========================================");
       
        // Now change make a new workload that has the same multi-partition transaction
        // but this time it only has one but with a transaction weight
        // We should get back the exact same cost
        new_workload = new Workload(catalogContext.catalog);
        final TransactionTrace new_txn = (TransactionTrace)multip_txn.clone();
        clone_queries = new ArrayList<QueryTrace>();
        for (QueryTrace query_trace : multip_txn.getQueries()) {
            QueryTrace clone_query = (QueryTrace)query_trace.clone();
            clone_query.setWeight(num_dupes);
            clone_queries.add(clone_query);
        } // FOR
        new_txn.setQueries(clone_queries);
        new_workload.addTransaction(catalog_proc, new_txn);
        assertEquals(1, new_workload.getTransactionCount());
        assertEquals(multip_txn.getQueryCount(), new_txn.getQueryCount());
        assertEquals(multip_txn.getQueryCount() * num_dupes, new_txn.getWeightedQueryCount());
       
        final SingleSitedCostModel new_costModel = new SingleSitedCostModel(catalogContext);
        final double new_cost = new_costModel.estimateWorkloadCost(catalogContext, new_workload);
View Full Code Here

    /**
     * testWeightedTxnInvalidateCache
     */
    public void testWeightedTxnInvalidateCache() throws Throwable {
        // Make a new workload that only has a single weighted copy of our multi-partition transaction
        Workload new_workload = new Workload(catalogContext.catalog);
        int weight = 16;
        TransactionTrace multip_txn = this.getMultiPartitionTransaction();
        Procedure catalog_proc = multip_txn.getCatalogItem(catalogContext.database);
        TransactionTrace clone = (TransactionTrace)multip_txn.clone();
        clone.setTransactionId(1000);
        clone.setWeight(weight);
        new_workload.addTransaction(catalog_proc, clone);
        assertEquals(1, new_workload.getTransactionCount());
       
        SingleSitedCostModel cost_model = new SingleSitedCostModel(catalogContext);
        final double orig_cost = cost_model.estimateWorkloadCost(catalogContext, new_workload);
        assert(orig_cost > 0);
       
View Full Code Here

    /**
     * testWeightedQueryInvalidateCache
     */
    public void testWeightedQueryInvalidateCache() throws Throwable {
        // Make a new workload that only has a single weighted copy of our multi-partition transaction
        Workload new_workload = new Workload(catalogContext.catalog);
        int weight = 6;
        TransactionTrace multip_txn = this.getMultiPartitionTransaction();
        Procedure catalog_proc = multip_txn.getCatalogItem(catalogContext.database);
        TransactionTrace clone = (TransactionTrace)multip_txn.clone();
        clone.setTransactionId(1000);
        for (QueryTrace qt : clone.getQueries()) {
            qt.setWeight(weight);
        }
        new_workload.addTransaction(catalog_proc, clone);
        assertEquals(1, new_workload.getTransactionCount());
        assertEquals(multip_txn.getQueryCount(), new_workload.getQueryCount());
       
        SingleSitedCostModel cost_model = new SingleSitedCostModel(catalogContext);
        final double orig_cost = cost_model.estimateWorkloadCost(catalogContext, new_workload);
        assert(orig_cost > 0);
        assertEquals(new_workload.getTransactionCount(), cost_model.getMultiPartitionProcedureHistogram().getSampleCount());
        assertEquals(0, cost_model.getSinglePartitionProcedureHistogram().getSampleCount());
       
        // Now invalidate the cache for the first query in the procedure
        Statement catalog_stmt = CollectionUtil.first(catalog_proc.getStatements());
        assertNotNull(catalog_stmt);
        Table catalog_tbl = CollectionUtil.first(CatalogUtil.getReferencedTables(catalog_stmt));
        assertNotNull(catalog_tbl);
        try {
            cost_model.invalidateCache(catalog_tbl);
        } catch (Throwable ex) {
            System.err.println(cost_model.debugHistograms(catalogContext));
            throw ex;
        }
        assertEquals(0, cost_model.getMultiPartitionProcedureHistogram().getSampleCount());
        assertEquals(new_workload.getTransactionCount(), cost_model.getSinglePartitionProcedureHistogram().getSampleCount());
        assertEquals(weight, cost_model.getQueryPartitionAccessHistogram().getSampleCount());
    }
View Full Code Here

        if (isFirstSetup()) {
            catalog_proc = this.getProcedure(TARGET_PROCEDURE);
            all_partitions = catalogContext.getAllPartitionIds();
           
            File file = this.getWorkloadFile(ProjectType.TPCC);
            workload = new Workload(catalogContext.catalog);

            // Check out this beauty:
            // (1) Filter by procedure name
            // (2) Filter to only include multi-partition txns
            // (3) Another limit to stop after allowing ### txns
View Full Code Here

        super.setUp(ProjectType.TPCC);
        this.addPartitions(NUM_PARTITIONS);
       
        if (workload == null) {
            File workload_file = this.getWorkloadFile(ProjectType.TPCC);
            workload = new Workload(catalogContext.catalog);
           
            workload.load(workload_file, catalogContext.database, new ProcedureLimitFilter(WORKLOAD_LIMIT));
            assertEquals(WORKLOAD_LIMIT, workload.getTransactionCount());
//            System.err.println(workload.getProcedureHistogram());
        }
View Full Code Here

       
        if (isFirstSetup()) {

            // DUPLICATE ALL SENDPAYMENTS TO BE NON-BLOCKING AND BLOCKABLE SENDPAYMENTS
            Procedure procs[] = { this.origProc, this.blockingProc, this.nonblockingProc };
            Workload workloads[] = new Workload[procs.length];
           
            // LOAD SAMPLE WORKLOAD
            Filter filter =  new ProcedureNameFilter(false)
                    .include(this.origProc.getName())
                    .attach(new NoAbortFilter())
                    .attach(new ProcedureLimitFilter(WORKLOAD_XACT_LIMIT));
            File workloadFile = this.getWorkloadFile(ProjectType.SMALLBANK);
            workloads[0] = new Workload(catalogContext.catalog).load(workloadFile, catalogContext.database, filter);
            File tempFile = FileUtil.getTempFile("workload", true);
            workloads[0].save(tempFile, catalogContext.database);
            assertTrue(tempFile.exists());
            String dump = FileUtil.readFile(tempFile);
            assertFalse(dump.isEmpty());
           
            for (int i = 1; i < procs.length; i++) {
                FileUtil.writeStringToFile(tempFile, dump.replace(this.origProc.getName(), procs[i].getName()));
                workloads[i] = new Workload(catalogContext.catalog).load(tempFile, catalogContext.database);
                assertEquals(workloads[0].getTransactionCount(), workloads[i].getTransactionCount());
                assertEquals(workloads[0].getQueryCount(), workloads[i].getQueryCount());
                // Make sure we change their txn ids
                for (TransactionTrace tt : workloads[i]) {
                    tt.setTransactionId(tt.getTransactionId() + (1000000 * i));
                } // FOR
           
                // DUPLICATE PARAMETER MAPPINGS
                for (ParameterMapping pm : catalogContext.paramMappings.get(this.origProc)) {
                    ParameterMapping clone = pm.clone();
                    clone.procedure_parameter = procs[i].getParameters().get(pm.procedure_parameter.getIndex());
                    clone.statement = procs[i].getStatements().get(pm.statement.getName());
                    clone.statement_parameter = clone.statement.getParameters().get(pm.statement_parameter.getIndex());
                    catalogContext.paramMappings.add(clone);
                } // FOR
                assert(workloads[i] != null) : i;
            } // FOR

            // COMBINE INTO A SINGLE WORKLOAD HANDLE
            workload = new Workload(catalogContext.catalog, workloads);
            assertEquals(workload.getTransactionCount(), workloads[0].getTransactionCount() * procs.length);
            assertEquals(workload.getQueryCount(), workloads[0].getQueryCount() * procs.length);
           
            // GENERATE MARKOV GRAPHS
            Map<Integer, MarkovGraphsContainer> markovs = MarkovGraphsContainerUtil.createMarkovGraphsContainers(
View Full Code Here

        if (isFirstSetup()) {
            catalog_proc = this.getProcedure(TARGET_PROCEDURE);
           
            File file = this.getWorkloadFile(ProjectType.TPCC);
            workload = new Workload(catalogContext.catalog);

            // Check out this beauty:
            // (1) Filter by procedure name
            // (2) Filter on partitions that start on our BASE_PARTITION
            // (3) Filter to only include multi-partition txns
            // (4) Another limit to stop after allowing ### txns
            // Where is your god now???
            Filter filter = new ProcedureNameFilter(false)
                    .include(TARGET_PROCEDURE.getSimpleName())
                    .attach(new ProcParameterValueFilter().include(1, new Integer(5))) // D_ID
                    // .attach(new ProcParameterArraySizeFilter(CatalogUtil.getArrayProcParameters(catalog_proc).get(0), 10, ExpressionType.COMPARE_EQUAL))
                    .attach(new BasePartitionTxnFilter(p_estimator, BASE_PARTITION))
                    .attach(new MultiPartitionTxnFilter(p_estimator, false))
                    .attach(new ProcedureLimitFilter(WORKLOAD_XACT_LIMIT));
            workload.load(file, catalogContext.database, filter);
           
            // Make a copy that doesn't have the first TransactionTrace
            Workload clone = new Workload(catalogContext.catalog, new Filter() {
                private boolean first = true;
                @Override
                protected FilterResult filter(AbstractTraceElement<? extends CatalogType> element) {
                    if (element instanceof TransactionTrace && first) {
                        this.first = false;
                        return (FilterResult.SKIP);
                    }
                    return FilterResult.ALLOW;
                }
                @Override
                public String debugImpl() { return null; }
                @Override
                protected void resetImpl() { }
            }, workload);
            TransactionTrace txn0 = CollectionUtil.first(workload.getTransactions());
            assertNotNull(txn0);
            TransactionTrace txn1 = CollectionUtil.first(clone.getTransactions());
            assertNotNull(txn1);
            assert(txn0.getTransactionId() != txn1.getTransactionId());
           
            // assertEquals(WORKLOAD_XACT_LIMIT, workload.getTransactionCount());
View Full Code Here

        super.setUp(ProjectType.TPCC);
        this.addPartitions(NUM_PARTITIONS);
       
        if (workload == null) {
            workload_file = this.getWorkloadFile(ProjectType.TPCC);
            workload = new Workload(catalog);
           
            ((Workload)workload).load(workload_file, catalog_db, new ProcedureLimitFilter(WORKLOAD_XACT_LIMIT));
            assert(workload.getTransactionCount() > 0) : "No transaction loaded from workload";
            assertEquals(WORKLOAD_XACT_LIMIT, workload.getTransactionCount());
        }
View Full Code Here

TOP

Related Classes of edu.brown.workload.Workload$WorkloadIterator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.