}
// ----------------------------------------------------------------------------
// BASE PARTITION
// ----------------------------------------------------------------------------
PartitionSet most_touched = initial_est.getMostTouchedPartitions(this.thresholds);
Integer e_base_partition = null;
if (most_touched.size() > 1) {
e_base_partition = CollectionUtil.random(most_touched);
} else {
e_base_partition = CollectionUtil.first(most_touched);
}
if (e_base_partition == null || e_base_partition != base_partition) {
if (trace.val) {
LOG.trace(String.format("Estimated base partition for txn #%d was %d but PartitionEstimator says it should be %d", s.getTransactionId(), e_base_partition, base_partition));
}
this.penalties.add(Penalty.WRONG_BASE_PARTITION);
// assert(false) : e_base_partition + " != " + base_partition + " "
// + most_touched;
}
// ----------------------------------------------------------------------------
// ABORTS
// If the transaction was predicted to be single-partitioned and we
// don't predict that it's going to
// abort when it actually did, then that's bad! Really bad!
// ----------------------------------------------------------------------------
first_penalty = true;
if (initial_est.isAbortable(this.thresholds) == false && a_last.isAbortVertex()) {
if (trace.val) {
if (first_penalty) {
LOG.trace("PENALTY: " + MarkovOptimization.OP3_ABORTS);
first_penalty = false;
}
LOG.trace(String.format("Txn #%d aborts but we predicted that it would never!", s.getTransactionId()));
}
this.penalties.add(a_singlepartitioned ? Penalty.MISSED_ABORT_SINGLE : Penalty.MISSED_ABORT_MULTI);
}
// For each MarkovEstimate, check whether there is a path in the graph
// for the current vertex
// to the abort state. If there isn't, then we need to check whether
// This should match ExecutionSite.executeLocalPlan()
MarkovVertex abort_v = markov.getAbortVertex();
boolean last_hadAbortPath = true;
first_penalty = true;
for (Estimate e : estimates) {
MarkovEstimate est = (MarkovEstimate)e;
assert(est.isInitialized()) : "Uninitialized MarkovEstimate from " + s;
MarkovVertex v = est.getVertex();
assert (v != null) : "No vertex?\n" + est;
boolean isAbortable = est.isAbortable(this.thresholds);
boolean isReadOnly = est.isReadOnlyPartition(this.thresholds, base_partition);
boolean hasAbortPath;
synchronized (markov) {
hasAbortPath = (markov.getPath(v, abort_v).isEmpty() == false);
} // SYNCH
// Make sure that we didn't have a path for the last MarkovEstimate
// but
// we somehow have one now
if (hasAbortPath && last_hadAbortPath == false) {
LOG.info("MARKOV: " + MarkovUtil.exportGraphviz(markov, false, markov.getPath(v, abort_v)).writeToTempFile());
assert (last_hadAbortPath);
}
// If the path is not empty, then this txn could still abort
if (hasAbortPath)
continue;
// Otherwise check whether our estimate still says to go with undo
// buffers when
// we're going to be read-only for the rest of the transaction
// This is would be considered wasted work
if (isAbortable && isReadOnly) {
if (trace.val) {
if (first_penalty) {
LOG.trace("PENALTY: " + MarkovOptimization.OP3_ABORTS);
first_penalty = false;
}
LOG.trace(String.format("Txn #%d will never abort but we failed to disable undo buffers!", s.getTransactionId()));
}
this.penalties.add(Penalty.MISSED_NO_UNDO_BUFFER);
}
last_hadAbortPath = false;
} // FOR
// ----------------------------------------------------------------------------
// MISSED PARTITIONS
// The transaction actually reads/writes at more partitions than it originally predicted
// This is expensive because it means that we have to abort+restart the txn
// ----------------------------------------------------------------------------
first_penalty = true;
for (Integer p : this.a_read_partitions) {
if (this.e_read_partitions.contains(p) == false) {
if (trace.val) {
if (first_penalty) {
LOG.trace("PENALTY: " + MarkovOptimization.OP2_PARTITIONS);
first_penalty = false;
}
LOG.trace(String.format("Txn #%d failed to predict that it was READING at partition %d", s.getTransactionId(), p));
}
this.penalties.add(Penalty.MISSED_READ_PARTITION);
}
} // FOR
for (Integer p : this.a_write_partitions) {
if (this.e_write_partitions.contains(p) == false) {
if (trace.val) {
if (first_penalty) {
LOG.trace("PENALTY: " + MarkovOptimization.OP2_PARTITIONS);
first_penalty = false;
}
LOG.trace(String.format("Txn #%d failed to predict that it was WRITING at partition %d", s.getTransactionId(), p));
}
this.penalties.add(Penalty.MISSED_WRITE_PARTITION);
}
} // FOR
// if (this.penalties.size() > 0) {
// LOG.info("Estimated Read Partitions: " + this.e_read_partitions);
// LOG.info("Estimated Write Partitions: " + this.e_write_partitions);
// LOG.info("Actual Read Partitions: " + this.a_read_partitions);
// LOG.info("Actual Write Partitions: " + this.a_write_partitions);
//
// LOG.info("IS ABORTABLE: " +
// initial_est.isAbortable(this.thresholds));
// LOG.info("ABORT THRESHOLD: " + this.thresholds.getAbortThreshold());
// LOG.info("Current State\n" + actual.get(1).debug());
// LOG.info("MarkovEstimate\n" + initial_est.toString());
// // LOG.info("GRAPH: " + MarkovUtil.exportGraphviz(s.getMarkovGraph(),
// false, true, false, null).writeToTempFile());
// System.exit(1);
// }
// ----------------------------------------------------------------------------
// RETURN TO PARTITIONS
// We declared that we were done at a partition but then later we
// actually needed it. This can happen if there is a path that a has
// very low probability of us taking it, but then ended up taking it anyway
//
// LATE FINISHED PARTITIONS
// We keep track of the last batch round that we finished with a partition.
// We then count how long it takes before we realize that we are finished.
// We declare that the MarkovEstimate was late if we don't mark it as finished
// immediately in the next batch
// ----------------------------------------------------------------------------
first_penalty = true;
boolean first_penalty5 = true;
this.done_partitions.clear();
int last_est_idx = 0;
PartitionSet touched_partitions = new PartitionSet();
PartitionSet new_touched_partitions = new PartitionSet();
// Reset the idle counters
this.idle_partition_ctrs.clear();
for (int i = 0; i < num_estimates; i++) {
MarkovEstimate est = (MarkovEstimate)estimates.get(i);
MarkovVertex est_v = est.getVertex();
// Get the path of vertices
int start = last_est_idx;
int stop = actual.indexOf(est_v);
// So this is just a hack so that our test cases still work
if (stop == -1) {
LOG.warn("Failed to find MarkovVertex " + est_v + " in path!");
continue;
}
assert(stop != -1);
new_touched_partitions.clear();
for (; start <= stop; start++) {
MarkovVertex v = actual.get(start);
assert (v != null);
Statement catalog_stmt = v.getCatalogItem();
QueryType qtype = QueryType.get(catalog_stmt.getQuerytype());
Penalty ptype = (qtype == QueryType.SELECT ? Penalty.RETURN_READ_PARTITION : Penalty.RETURN_WRITE_PARTITION);
for (Integer p : v.getPartitions()) {
// Check if we read/write at any partition that was
// previously declared as done
if (this.done_partitions.contains(p)) {
if (trace.val) {
if (first_penalty) {
LOG.trace("PENALTY: " + MarkovOptimization.OP4_FINISHED);
first_penalty = false;
}
LOG.trace(String.format("Txn #%d said that it was done at partition %d but it executed a %s", s.getTransactionId(), p, qtype.name()));
}
this.penalties.add(ptype);
this.done_partitions.remove(p);
}
} // FOR
new_touched_partitions.addAll(v.getPartitions());
// For each partition that we don't touch here, we want to
// increase their idle counter
this.idle_partition_ctrs.put(this.catalogContext.getAllPartitionIds());
} // FOR