public void run() {
int partition = partitioner.partition(loggingContext.getLogPartition(), numPartitions);
callback.init();
KafkaConsumer kafkaConsumer = new KafkaConsumer(seedBrokers, topic, partition, kafkaTailFetchTimeoutMs);
try {
Filter logFilter = new AndFilter(ImmutableList.of(LoggingContextHelper.createFilter(loggingContext),
filter));
long latestOffset = kafkaConsumer.fetchOffset(KafkaConsumer.Offset.LATEST);
long earliestOffset = kafkaConsumer.fetchOffset(KafkaConsumer.Offset.EARLIEST);
long stopOffset;
long startOffset;
if (fromOffset < 0) {
stopOffset = latestOffset;
} else {
stopOffset = fromOffset;
}
startOffset = stopOffset - maxEvents;
if (startOffset < earliestOffset) {
startOffset = earliestOffset;
}
if (startOffset >= stopOffset || startOffset >= latestOffset) {
// At end of kafka events, nothing to return
return;
}
// Events between startOffset and stopOffset may not have the required logs we are looking for,
// we'll need to return at least 1 log offset for next getLogPrev call to work.
int fetchCount = 0;
while (fetchCount == 0) {
fetchCount = fetchLogEvents(kafkaConsumer, logFilter, startOffset, stopOffset, maxEvents, callback);
stopOffset = startOffset;
if (stopOffset <= earliestOffset) {
// Truly no log messages found.
break;
}
startOffset = stopOffset - maxEvents;
if (startOffset < earliestOffset) {
startOffset = earliestOffset;
}
}
} catch (Throwable e) {
LOG.error("Got exception: ", e);
throw Throwables.propagate(e);
} finally {
try {
try {
callback.close();
} finally {
kafkaConsumer.close();
}
} catch (IOException e) {
LOG.error(String.format("Caught exception when closing KafkaConsumer for topic %s, partition %d",
topic, partition), e);
}