* @param logs
* @throws IOException
*/
void replayLog(List<File> logs) throws Exception {
int count = 0;
MultiMap transactionMap = new MultiValueMap();
// seed both with the highest known sequence of either the tnxid or woid
long transactionIDSeed = lastCheckpoint, writeOrderIDSeed = lastCheckpoint;
LOG.info("Starting replay of " + logs);
//Load the inflight puts into the transaction map to see if they were
//committed in one of the logs.
SetMultimap<Long, Long> inflightPuts = queue.deserializeInflightPuts();
for (Long txnID : inflightPuts.keySet()) {
Set<Long> eventPointers = inflightPuts.get(txnID);
for (Long eventPointer : eventPointers) {
transactionMap.put(txnID, FlumeEventPointer.fromLong(eventPointer));
}
}
SetMultimap<Long, Long> inflightTakes = queue.deserializeInflightTakes();
try {
for (File log : logs) {
LOG.info("Replaying " + log);
try {
LogFile.SequentialReader reader =
LogFileFactory.getSequentialReader(log, encryptionKeyProvider);
reader.skipToLastCheckpointPosition(queue.getLogWriteOrderID());
Preconditions.checkState(!readers.containsKey(reader.getLogFileID()),
"Readers " + readers + " already contains "
+ reader.getLogFileID());
readers.put(reader.getLogFileID(), reader);
LogRecord logRecord = reader.next();
if(logRecord == null) {
readers.remove(reader.getLogFileID());
reader.close();
} else {
logRecordBuffer.add(logRecord);
}
} catch(EOFException e) {
LOG.warn("Ignoring " + log + " due to EOF", e);
}
}
LogRecord entry = null;
FlumeEventPointer ptr = null;
while ((entry = next()) != null) {
// for puts the fileId is the fileID of the file they exist in
// for takes the fileId and offset are pointers to a put
int fileId = entry.getFileID();
int offset = entry.getOffset();
TransactionEventRecord record = entry.getEvent();
short type = record.getRecordType();
long trans = record.getTransactionID();
transactionIDSeed = Math.max(transactionIDSeed, trans);
writeOrderIDSeed = Math.max(writeOrderIDSeed,
record.getLogWriteOrderID());
readCount++;
if(readCount % 10000 == 0 && readCount > 0) {
LOG.info("Read " + readCount + " records");
}
if (record.getLogWriteOrderID() > lastCheckpoint) {
if (type == TransactionEventRecord.Type.PUT.get()) {
putCount++;
ptr = new FlumeEventPointer(fileId, offset);
transactionMap.put(trans, ptr);
} else if (type == TransactionEventRecord.Type.TAKE.get()) {
takeCount++;
Take take = (Take) record;
ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
transactionMap.put(trans, ptr);
} else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
rollbackCount++;
transactionMap.remove(trans);
} else if (type == TransactionEventRecord.Type.COMMIT.get()) {
commitCount++;
@SuppressWarnings("unchecked")
Collection<FlumeEventPointer> pointers =
(Collection<FlumeEventPointer>) transactionMap.remove(trans);
if (((Commit) record).getType()
== TransactionEventRecord.Type.TAKE.get()) {
if (inflightTakes.containsKey(trans)) {
if(pointers == null){
pointers = Sets.newHashSet();