this.lastRolledMs = System.currentTimeMillis();
LOG.info("Rolled writer for " + getName());
}
Channel channel = getChannel();
Transaction transaction = null;
try {
long processedEvents = 0;
transaction = channel.getTransaction();
transaction.begin();
for (; processedEvents < batchSize; processedEvents += 1) {
Event event = channel.take();
if (event == null) {
// no events available in the channel
break;
}
this.datum = deserialize(event, reuseDatum ? datum : null);
// writeEncoded would be an optimization in some cases, but HBase
// will not support it and partitioned Datasets need to get partition
// info from the entity Object. We may be able to avoid the
// serialization round-trip otherwise.
writer.write(datum);
}
// TODO: Add option to sync, depends on CDK-203
writer.flush();
// commit after data has been written and flushed
transaction.commit();
if (processedEvents == 0) {
counter.incrementBatchEmptyCount();
return Status.BACKOFF;
} else if (processedEvents < batchSize) {
counter.incrementBatchUnderflowCount();
} else {
counter.incrementBatchCompleteCount();
}
counter.addToEventDrainSuccessCount(processedEvents);
return Status.READY;
} catch (Throwable th) {
// catch-all for any unhandled Throwable so that the transaction is
// correctly rolled back.
if (transaction != null) {
try {
transaction.rollback();
} catch (Exception ex) {
LOG.error("Transaction rollback failed", ex);
throw Throwables.propagate(ex);
}
}
// close the writer and remove the its reference
writer.close();
this.writer = null;
this.lastRolledMs = System.currentTimeMillis();
// handle the exception
Throwables.propagateIfInstanceOf(th, Error.class);
Throwables.propagateIfInstanceOf(th, EventDeliveryException.class);
throw new EventDeliveryException(th);
} finally {
if (transaction != null) {
transaction.close();
}
}
}