// allocate the incoming record batch loaders
senderCount = rawBatches.size();
if (senderCount == 0) {
if (firstBatch) {
RecordBatchLoader loader = new RecordBatchLoader(oContext.getAllocator());
try {
loader.load(emptyBatch.getHeader().getDef(), emptyBatch.getBody());
} catch (SchemaChangeException e) {
throw new RuntimeException(e);
}
for (VectorWrapper w : loader) {
outgoingContainer.add(w.getValueVector());
}
outgoingContainer.buildSchema(SelectionVectorMode.NONE);
done = true;
return IterOutcome.OK_NEW_SCHEMA;
}
return IterOutcome.NONE;
}
incomingBatches = new RawFragmentBatch[senderCount];
batchOffsets = new int[senderCount];
batchLoaders = new RecordBatchLoader[senderCount];
for (int i = 0; i < senderCount; ++i) {
incomingBatches[i] = rawBatches.get(i);
batchLoaders[i] = new RecordBatchLoader(oContext.getAllocator());
}
int i = 0;
for (RawFragmentBatch batch : incomingBatches) {
// initialize the incoming batchLoaders
UserBitShared.RecordBatchDef rbd = batch.getHeader().getDef();
try {
batchLoaders[i].load(rbd, batch.getBody());
} catch(SchemaChangeException e) {
logger.error("MergingReceiver failed to load record batch from remote host. {}", e);
context.fail(e);
return IterOutcome.STOP;
}
batch.release();
++batchOffsets[i];
++i;
}
// Canonicalize each incoming batch, so that vectors are alphabetically sorted based on SchemaPath.
for (RecordBatchLoader loader : batchLoaders) {
loader.canonicalize();
}
// Ensure all the incoming batches have the identical schema.
if (!isSameSchemaAmongBatches(batchLoaders)) {
logger.error("Incoming batches for merging receiver have diffferent schemas!");