long nextTxId = recoveryFile.readLong();
long expectedChecksum = recoveryFile.readLong();
int pageCounter = recoveryFile.readInt();
recoveryFile.seek(RECOVERY_FILE_HEADER_SIZE);
Checksum checksum = new Adler32();
LinkedHashMap<Long, byte[]> batch = new LinkedHashMap<Long, byte[]>();
try {
for (int i = 0; i < pageCounter; i++) {
long offset = recoveryFile.readLong();
byte []data = new byte[pageSize];
if( recoveryFile.read(data, 0, pageSize) != pageSize ) {
// Invalid recovery record, Could not fully read the data". Probably due to a partial write to the recovery buffer
return nextTxId;
}
checksum.update(data, 0, pageSize);
batch.put(offset, data);
}
} catch (Exception e) {
// If an error occurred it was cause the redo buffer was not full written out correctly.. so don't redo it.
// as the pages should still be consistent.
LOG.debug("Redo buffer was not fully intact: ", e);
return nextTxId;
}
recoveryPageCount = pageCounter;
// If the checksum is not valid then the recovery buffer was partially written to disk.
if( checksum.getValue() != expectedChecksum ) {
return nextTxId;
}
// Re-apply all the writes in the recovery buffer.
for (Map.Entry<Long, byte[]> e : batch.entrySet()) {