LOG.warn("Enabled Distributed Transaction Validation Checker");
}
// *********************************** DEBUG ***********************************
// Things that we will need in the loop below
InternalMessage nextWork = null;
AbstractTransaction nextTxn = null;
if (debug.val)
LOG.debug("Starting PartitionExecutor run loop...");
try {
while (this.shutdown_state == ShutdownState.STARTED) {
this.currentTxnId = null;
nextTxn = null;
nextWork = null;
// This is the starting state of the PartitionExecutor.
// At this point here we currently don't have a txn to execute nor
// are we involved in a distributed txn running at another partition.
// So we need to go our PartitionLockQueue and get back the next
// txn that will have our lock.
if (this.currentDtxn == null) {
this.tick();
if (hstore_conf.site.exec_profiling) profiler.poll_time.start();
try {
nextTxn = this.queueManager.checkLockQueue(this.partitionId); // NON-BLOCKING
} finally {
if (hstore_conf.site.exec_profiling) profiler.poll_time.stopIfStarted();
}
// If we get something back here, then it should become our current transaction.
if (nextTxn != null) {
// If it's a single-partition txn, then we can return the StartTxnMessage
// so that we can fire it off right away.
if (nextTxn.isPredictSinglePartition()) {
LocalTransaction localTxn = (LocalTransaction)nextTxn;
nextWork = localTxn.getStartTxnMessage();
if (hstore_conf.site.txn_profiling && localTxn.profiler != null)
localTxn.profiler.startQueueExec();
}
// If it's as distribued txn, then we'll want to just set it as our
// current dtxn at this partition and then keep checking the queue
// for more work.
else {
this.setCurrentDtxn(nextTxn);
}
}
}
// -------------------------------
// Poll Work Queue
// -------------------------------
// Check if we have anything to do right now
if (nextWork == null) {
if (hstore_conf.site.exec_profiling) profiler.idle_time.start();
try {
// If we're allowed to speculatively execute txns, then we don't want to have
// to wait to see if anything will show up in our work queue.
if (hstore_conf.site.specexec_enable && this.lockQueue.approximateIsEmpty() == false) {
nextWork = this.work_queue.poll();
/*if (nextWork != null) {
System.out.println(String.format("Polled a work %s from partition %d",
nextWork.getClass().getSimpleName(), this.work_queue.size()));
} else {
System.out.println("Null work!");
}*/
} else {
nextWork = this.work_queue.poll(WORK_QUEUE_POLL_TIME, WORK_QUEUE_POLL_TIMEUNIT);
/*if (nextWork != null) {
LOG.info(String.format("Polled a work %s from partition %d",
nextWork.getClass().getSimpleName(), this.work_queue.size()));
} else {
LOG.info("Null work!");
}*/
}
} catch (InterruptedException ex) {
continue;
} finally {
if (hstore_conf.site.exec_profiling) profiler.idle_time.stopIfStarted();
}
}
// -------------------------------
// Process Work
// -------------------------------
if (nextWork != null) {
if (trace.val) LOG.trace("Next Work: " + nextWork);
if (hstore_conf.site.exec_profiling) {
profiler.numMessages.put(nextWork.getClass().getSimpleName());
profiler.exec_time.start();
if (this.currentDtxn != null) profiler.sp2_time.stopIfStarted();
}
try {
// -------------------------------