.addError(ErrorHelper.logAndConvertError(context.getIdentity(), "Screen received stop request sent.",
context.getFailureCause(), logger, verbose))
.setDef(RecordBatchDef.getDefaultInstance()) //
.setIsLastChunk(true) //
.build();
QueryWritableBatch batch = new QueryWritableBatch(header);
stats.startWait();
try {
connection.sendResult(listener, batch);
} finally {
stats.stopWait();
}
sendCount.increment();
return false;
}
case NONE: {
sendCount.waitForSendComplete();
// context.getStats().batchesCompleted.inc(1);
QueryWritableBatch batch;
if (!first) {
QueryResult header = QueryResult.newBuilder() //
.setQueryId(context.getHandle().getQueryId()) //
.setRowCount(0) //
.setDef(RecordBatchDef.getDefaultInstance()) //
.setIsLastChunk(true) //
.build();
batch = new QueryWritableBatch(header);
} else {
batch = QueryWritableBatch.getEmptyBatchWithSchema(context.getHandle().getQueryId(), 0, true, incoming.getSchema());
}
stats.startWait();
try {
connection.sendResult(listener, batch);
} finally {
stats.stopWait();
}
sendCount.increment();
return false;
}
case OK_NEW_SCHEMA:
materializer = new VectorRecordMaterializer(context, incoming);
// fall through.
case OK:
// context.getStats().batchesCompleted.inc(1);
// context.getStats().recordsCompleted.inc(incoming.getRecordCount());
QueryWritableBatch batch = materializer.convertNext(false);
updateStats(batch);
stats.startWait();
try {
connection.sendResult(listener, batch);
} finally {