* HDFS. <br/>
* This method is not thread safe.
*/
public Status process() throws EventDeliveryException {
Channel channel = getChannel();
Transaction transaction = channel.getTransaction();
List<BucketWriter> writers = Lists.newArrayList();
transaction.begin();
try {
int txnEventCount = 0;
for (txnEventCount = 0; txnEventCount < batchSize; txnEventCount++) {
Event event = channel.take();
if (event == null) {
break;
}
// reconstruct the path name by substituting place holders
String realPath = BucketPath.escapeString(path, event.getHeaders(),
timeZone, needRounding, roundUnit, roundValue);
BucketWriter bucketWriter = sfWriters.get(realPath);
// we haven't seen this file yet, so open it and cache the handle
if (bucketWriter == null) {
HDFSWriter hdfsWriter = writerFactory.getWriter(fileType);
FlumeFormatter formatter = HDFSFormatterFactory
.getFormatter(writeFormat);
WriterCallback idleCallback = null;
if(idleTimeout != 0) {
idleCallback = new WriterCallback() {
@Override
public void run(String bucketPath) {
sfWriters.remove(bucketPath);
}
};
}
bucketWriter = new BucketWriter(rollInterval, rollSize, rollCount,
batchSize, context, realPath, suffix, codeC, compType, hdfsWriter,
formatter, timedRollerPool, proxyTicket, sinkCounter, idleTimeout,
idleCallback);
sfWriters.put(realPath, bucketWriter);
}
// track the buckets getting written in this transaction
if (!writers.contains(bucketWriter)) {
writers.add(bucketWriter);
}
// Write the data to HDFS
append(bucketWriter, event);
}
if (txnEventCount == 0) {
sinkCounter.incrementBatchEmptyCount();
} else if (txnEventCount == batchSize) {
sinkCounter.incrementBatchCompleteCount();
} else {
sinkCounter.incrementBatchUnderflowCount();
}
// flush all pending buckets before committing the transaction
for (BucketWriter bucketWriter : writers) {
flush(bucketWriter);
}
transaction.commit();
if (txnEventCount < 1) {
return Status.BACKOFF;
} else {
sinkCounter.addToEventDrainSuccessCount(txnEventCount);
return Status.READY;
}
} catch (IOException eIO) {
transaction.rollback();
LOG.warn("HDFS IO error", eIO);
return Status.BACKOFF;
} catch (Throwable th) {
transaction.rollback();
LOG.error("process failed", th);
if (th instanceof Error) {
throw (Error) th;
} else {
throw new EventDeliveryException(th);
}
} finally {
transaction.close();
}
}