// Now if it's a remote transaction, we need to use the coordinator to send
// them our result. Note that we want to send a single message per partition. Unlike
// with the TransactionWorkRequests, we don't need to wait until all of the partitions
// that are prefetching for this txn at our local HStoreSite to finish.
if (is_remote) {
WorkResult wr = this.buildWorkResult(ts, result, status, error);
TransactionPrefetchResult.Builder builder = TransactionPrefetchResult.newBuilder()
.setTransactionId(ts.getTransactionId().longValue())
.setSourcePartition(this.partitionId)
.setResult(wr)
.setStatus(status)
.addAllFragmentId(fragment.getFragmentIdList())
.addAllStmtCounter(fragment.getStmtCounterList());
for (int i = 0, cnt = fragment.getFragmentIdCount(); i < cnt; i++) {
builder.addParamHash(parameters[i].hashCode());
}
if (debug.val)
LOG.debug(String.format("%s - Sending back %s to partition %d [numResults=%s, status=%s]",
ts, wr.getClass().getSimpleName(), ts.getBasePartition(),
result.size(), status));
hstore_coordinator.transactionPrefetchResult((RemoteTransaction)ts, builder.build());
}
}
// -------------------------------
// LOCAL TRANSACTION
// -------------------------------
else if (is_remote == false) {
LocalTransaction local_ts = (LocalTransaction)ts;
// If the transaction is local, store the result directly in the local TransactionState
if (status == Status.OK) {
if (trace.val)
LOG.trace(String.format("%s - Storing %d dependency results locally for successful work fragment",
ts, result.size()));
assert(result.size() == outputDepIds.length);
DependencyTracker otherTracker = this.hstore_site.getDependencyTracker(ts.getBasePartition());
for (int i = 0; i < outputDepIds.length; i++) {
if (trace.val)
LOG.trace(String.format("%s - Storing DependencyId #%d [numRows=%d]\n%s",
ts, outputDepIds[i], result.dependencies[i].getRowCount(),
result.dependencies[i]));
try {
otherTracker.addResult(local_ts, this.partitionId, outputDepIds[i], result.dependencies[i]);
} catch (Throwable ex) {
// ex.printStackTrace();
String msg = String.format("Failed to stored Dependency #%d for %s [idx=%d, fragmentId=%d]",
outputDepIds[i], ts, i, fragmentIds[i]);
LOG.error(String.format("%s - WorkFragment:%d\nExpectedIds:%s\nOutputDepIds: %s\nResultDepIds: %s\n%s",
msg, fragment.hashCode(),
fragment.getOutputDepIdList(), Arrays.toString(outputDepIds),
Arrays.toString(result.depIds), fragment));
throw new ServerFaultException(msg, ex);
}
} // FOR
} else {
local_ts.setPendingError(error, true);
}
}
// -------------------------------
// REMOTE TRANSACTION
// -------------------------------
else {
if (trace.val)
LOG.trace(String.format("%s - Constructing WorkResult with %d bytes from partition %d to send " +
"back to initial partition %d [status=%s]",
ts, (result != null ? result.size() : null),
this.partitionId, ts.getBasePartition(), status));
RpcCallback<WorkResult> callback = ((RemoteTransaction)ts).getWorkCallback();
if (callback == null) {
LOG.fatal("Unable to send FragmentResponseMessage for " + ts);
LOG.fatal("Orignal WorkFragment:\n" + fragment);
LOG.fatal(ts.toString());
throw new ServerFaultException("No RPC callback to HStoreSite for " + ts, ts.getTransactionId());
}
WorkResult response = this.buildWorkResult((RemoteTransaction)ts, result, status, error);
assert(response != null);
callback.run(response);
}
// Check whether this is the last query that we're going to get