long undoToken = this.calculateNextUndoToken(ts, is_readonly);
ts.initRound(this.partitionId, undoToken);
ts.startRound(this.partitionId);
}
DependencySet result = null;
Status status = Status.OK;
SerializableException error = null;
// Check how many fragments are not marked as ignored
// If the fragment is marked as ignore then it means that it was already
// sent to this partition for prefetching. We need to make sure that we remove
// it from the list of fragmentIds that we need to execute.
int fragmentCount = fragment.getFragmentIdCount();
for (int i = 0; i < fragmentCount; i++) {
if (fragment.getStmtIgnore(i)) {
fragmentCount--;
}
} // FOR
final ParameterSet parameters[] = tmp_fragmentParams.getParameterSet(fragmentCount);
assert(parameters.length == fragmentCount);
// Construct data given to the EE to execute this work fragment
this.tmp_EEdependencies.clear();
long fragmentIds[] = tmp_fragmentIds.getArray(fragmentCount);
int fragmentOffsets[] = tmp_fragmentOffsets.getArray(fragmentCount);
int outputDepIds[] = tmp_outputDepIds.getArray(fragmentCount);
int inputDepIds[] = tmp_inputDepIds.getArray(fragmentCount);
int offset = 0;
for (int i = 0, cnt = fragment.getFragmentIdCount(); i < cnt; i++) {
if (fragment.getStmtIgnore(i) == false) {
fragmentIds[offset] = fragment.getFragmentId(i);
fragmentOffsets[offset] = i;
outputDepIds[offset] = fragment.getOutputDepId(i);
inputDepIds[offset] = fragment.getInputDepId(i);
parameters[offset] = allParameters[fragment.getParamIndex(i)];
this.getFragmentInputs(ts, inputDepIds[offset], this.tmp_EEdependencies);
if (trace.val && ts.isSysProc() == false && is_basepartition == false)
LOG.trace(String.format("%s - Offset:%d FragmentId:%d OutputDep:%d/%d InputDep:%d/%d",
ts, offset, fragmentIds[offset],
outputDepIds[offset], fragment.getOutputDepId(i),
inputDepIds[offset], fragment.getInputDepId(i)));
offset++;
}
} // FOR
assert(offset == fragmentCount);
try {
result = this.executeFragmentIds(ts,
ts.getLastUndoToken(this.partitionId),
fragmentIds,
parameters,
outputDepIds,
inputDepIds,
this.tmp_EEdependencies);
} catch (EvictedTupleAccessException ex) {
// XXX: What do we do if this is not a single-partition txn?
status = Status.ABORT_EVICTEDACCESS;
error = ex;
} catch (ConstraintFailureException ex) {
LOG.info("Found the abort!!!"+ex);
status = Status.ABORT_UNEXPECTED;
error = ex;
} catch (SQLException ex) {
LOG.info("Found the abort!!!"+ex);
status = Status.ABORT_UNEXPECTED;
error = ex;
} catch (EEException ex) {
// this.crash(ex);
LOG.info("Found the abort!!!"+ex);
status = Status.ABORT_UNEXPECTED;
error = ex;
} catch (Throwable ex) {
LOG.info("Found the abort!!!"+ex);
status = Status.ABORT_UNEXPECTED;
if (ex instanceof SerializableException) {
error = (SerializableException)ex;
} else {
error = new SerializableException(ex);
}
} finally {
if (error != null) {
// error.printStackTrace();
// if (error instanceof EvictedTupleAccessException){
// EvictedTupleAccessException ex = (EvictedTupleAccessException) error;
// }
LOG.warn(String.format("%s - Unexpected %s on partition %d",
ts, error.getClass().getSimpleName(), this.partitionId),
error); // (debug.val ? error : null));
}
// Success, but without any results???
if (result == null && status == Status.OK) {
String msg = String.format("The WorkFragment %s executed successfully on Partition %d but " +
"result is null for %s",
fragment.getFragmentIdList(), this.partitionId, ts);
Exception ex = new Exception(msg);
if (debug.val) LOG.warn(ex);
LOG.info("Found the abort!!!"+ex);
status = Status.ABORT_UNEXPECTED;
error = new SerializableException(ex);
}
}
// For single-partition INSERT/UPDATE/DELETE queries, we don't directly
// execute the SendPlanNode in order to get back the number of tuples that
// were modified. So we have to rely on the output dependency ids set in the task
assert(status != Status.OK ||
(status == Status.OK && result.size() == fragmentIds.length)) :
"Got back " + result.size() + " results but was expecting " + fragmentIds.length;
// Make sure that we mark the round as finished before we start sending results
if (is_basepartition == false) {
ts.finishRound(this.partitionId);
}
// -------------------------------
// PREFETCH QUERIES
// -------------------------------
if (is_prefetch) {
// Regardless of whether this txn is running at the same HStoreSite as this PartitionExecutor,
// we always need to put the result inside of the local query cache
// This is so that we can identify if we get request for a query that we have already executed
// We'll only do this if it succeeded. If it failed, then we won't do anything and will
// just wait until they come back to execute the query again before
// we tell them that something went wrong. It's ghetto, but it's just easier this way...
if (status == Status.OK) {
// We're going to store the result in the base partition cache if they're
// on the same HStoreSite as us
if (is_remote == false) {
PartitionExecutor other = this.hstore_site.getPartitionExecutor(ts.getBasePartition());
for (int i = 0, cnt = result.size(); i < cnt; i++) {
if (trace.val)
LOG.trace(String.format("%s - Storing %s prefetch result [params=%s]",
ts, CatalogUtil.getPlanFragment(catalogContext.catalog, fragment.getFragmentId(fragmentOffsets[i])).fullName(),
parameters[i]));
other.addPrefetchResult((LocalTransaction)ts,
fragment.getStmtCounter(fragmentOffsets[i]),
fragment.getFragmentId(fragmentOffsets[i]),
this.partitionId,
parameters[i].hashCode(),
result.dependencies[i]);
} // FOR
}
}
// Now if it's a remote transaction, we need to use the coordinator to send
// them our result. Note that we want to send a single message per partition. Unlike
// with the TransactionWorkRequests, we don't need to wait until all of the partitions
// that are prefetching for this txn at our local HStoreSite to finish.
if (is_remote) {
WorkResult wr = this.buildWorkResult(ts, result, status, error);
TransactionPrefetchResult.Builder builder = TransactionPrefetchResult.newBuilder()
.setTransactionId(ts.getTransactionId().longValue())
.setSourcePartition(this.partitionId)
.setResult(wr)
.setStatus(status)
.addAllFragmentId(fragment.getFragmentIdList())
.addAllStmtCounter(fragment.getStmtCounterList());
for (int i = 0, cnt = fragment.getFragmentIdCount(); i < cnt; i++) {
builder.addParamHash(parameters[i].hashCode());
}
if (debug.val)
LOG.debug(String.format("%s - Sending back %s to partition %d [numResults=%s, status=%s]",
ts, wr.getClass().getSimpleName(), ts.getBasePartition(),
result.size(), status));
hstore_coordinator.transactionPrefetchResult((RemoteTransaction)ts, builder.build());
}
}
// -------------------------------
// LOCAL TRANSACTION
// -------------------------------
else if (is_remote == false) {
LocalTransaction local_ts = (LocalTransaction)ts;
// If the transaction is local, store the result directly in the local TransactionState
if (status == Status.OK) {
if (trace.val)
LOG.trace(String.format("%s - Storing %d dependency results locally for successful work fragment",
ts, result.size()));
assert(result.size() == outputDepIds.length);
DependencyTracker otherTracker = this.hstore_site.getDependencyTracker(ts.getBasePartition());
for (int i = 0; i < outputDepIds.length; i++) {
if (trace.val)
LOG.trace(String.format("%s - Storing DependencyId #%d [numRows=%d]\n%s",
ts, outputDepIds[i], result.dependencies[i].getRowCount(),
result.dependencies[i]));
try {
otherTracker.addResult(local_ts, this.partitionId, outputDepIds[i], result.dependencies[i]);
} catch (Throwable ex) {
// ex.printStackTrace();
String msg = String.format("Failed to stored Dependency #%d for %s [idx=%d, fragmentId=%d]",
outputDepIds[i], ts, i, fragmentIds[i]);
LOG.error(String.format("%s - WorkFragment:%d\nExpectedIds:%s\nOutputDepIds: %s\nResultDepIds: %s\n%s",
msg, fragment.hashCode(),
fragment.getOutputDepIdList(), Arrays.toString(outputDepIds),
Arrays.toString(result.depIds), fragment));
throw new ServerFaultException(msg, ex);
}
} // FOR
} else {
local_ts.setPendingError(error, true);
}
}
// -------------------------------
// REMOTE TRANSACTION
// -------------------------------
else {
if (trace.val)
LOG.trace(String.format("%s - Constructing WorkResult with %d bytes from partition %d to send " +
"back to initial partition %d [status=%s]",
ts, (result != null ? result.size() : null),
this.partitionId, ts.getBasePartition(), status));
RpcCallback<WorkResult> callback = ((RemoteTransaction)ts).getWorkCallback();
if (callback == null) {
LOG.fatal("Unable to send FragmentResponseMessage for " + ts);