/* Keep track of all database IDs encountered. */
final Set<DatabaseId> databases = new HashSet<DatabaseId>();
/* Create the file reader. */
final CleanerFileReader reader = new CleanerFileReader
(env, readBufferSize, DbLsn.makeLsn(fileNum, 0), fileNum,
fileSummary);
/* Validate all entries before ever deleting a file. */
reader.setAlwaysValidateChecksum(true);
try {
final TreeLocation location = new TreeLocation();
int nProcessedLNs = 0;
while (reader.readNextEntryAllowExceptions()) {
cleaner.nEntriesRead.increment();
long logLsn = reader.getLastLsn();
long fileOffset = DbLsn.getFileOffset(logLsn);
boolean isLN = reader.isLN();
boolean isIN = reader.isIN();
boolean isBINDelta = reader.isBINDelta();
boolean isDbTree = reader.isDbTree();
boolean isObsolete = false;
/* Maintain a set of all databases encountered. */
final DatabaseId dbId = reader.getDatabaseId();
DatabaseImpl db = null;
if (dbId != null) {
databases.add(dbId);
db = dbMapTree.getDb(dbId, cleaner.lockTimeout, dbCache);
/*
* If the DB is gone, this entry is obsolete. If delete
* cleanup is in progress, we will put the DB into the DB
* pending set further below. This entry will be declared
* deleted after the delete cleanup is finished.
*/
if (db == null || db.isDeleted()) {
isObsolete = true;
}
}
/* Remember the version of the log file. */
if (reader.isFileHeader()) {
fileLogVersion = reader.getFileHeader().getLogVersion();
}
/* Stop if the daemon is shut down. */
if (env.isClosing()) {
return false;
}
/* Update background reads. */
int nReads = reader.getAndResetNReads();
if (nReads > 0) {
env.updateBackgroundReads(nReads);
}
/* Sleep if background read/write limit was exceeded. */
env.sleepAfterBackgroundIO();
/* Check for a known obsolete node. */
while (nextObsolete < fileOffset && obsoleteIter.hasNext()) {
nextObsolete = obsoleteIter.next();
}
if (nextObsolete == fileOffset) {
isObsolete = true;
}
/* Check for the entry type next because it is very cheap. */
if (!isObsolete &&
!isLN &&
!isIN &&
!isBINDelta &&
!isDbTree) {
/* Consider all entries we do not process as obsolete. */
isObsolete = true;
}
/*
* SR 14583: In JE 2.0 and later we can assume that all
* deleted LNs are obsolete. Either the delete committed and
* the BIN parent is marked with a pending deleted bit, or the
* delete rolled back, in which case there is no reference
* to this entry. JE 1.7.1 and earlier require a tree lookup
* because deleted LNs may still be reachable through their BIN
* parents.
*/
if (!isObsolete &&
isLN &&
reader.isLNDeleted() &&
fileLogVersion > 2) {
/* Deleted LNs are always obsolete. */
isObsolete = true;
}
/* Skip known obsolete nodes. */
if (isObsolete) {
/* Count obsolete stats. */
if (!calcUtilizationOnly) {
if (isLN) {
nLNsObsoleteThisRun++;
} else if (isBINDelta) {
nBINDeltasObsoleteThisRun++;
} else if (isIN) {
nINsObsoleteThisRun++;
}
}
/* Update the pending DB set for obsolete entries. */
if (checkPendingDbSet != null && dbId != null) {
checkPendingDbSet.add(dbId);
}
/* Count utilization for obsolete entry. */
reader.countObsolete();
continue;
}
/*
* If we are only calculating utilization, do not process
* non-obsolete entries.
*/
if (calcUtilizationOnly) {
continue;
}
/* Evict before processing each entry. */
if (Cleaner.DO_CRITICAL_EVICTION) {
env.daemonEviction(true /*backgroundIO*/);
}
/* The entry is not known to be obsolete -- process it now. */
if (isLN) {
final LNLogEntry lnEntry = reader.getLNLogEntry();
lnEntry.postFetchInit(db);
final LN targetLN = lnEntry.getLN();
final byte[] key = lnEntry.getKey();
lookAheadCache.add
(Long.valueOf(DbLsn.getFileOffset(logLsn)),
new LNInfo(targetLN, dbId, key));
if (lookAheadCache.isFull()) {
processLN(fileNum, location, lookAheadCache, dbCache);
}
/*
* Process pending LNs before proceeding in order to
* prevent the pending list from growing too large.
*/
nProcessedLNs += 1;
if (nProcessedLNs % PROCESS_PENDING_EVERY_N_LNS == 0) {
cleaner.processPending();
}
} else if (isIN) {
final IN targetIN = reader.getIN(db);
targetIN.setDatabase(db);
processIN(targetIN, db, logLsn);
} else if (isBINDelta) {
final BINDelta delta = reader.getBINDelta();
processBINDelta(delta, db, logLsn);
} else if (isDbTree) {
env.rewriteMapTreeRoot(logLsn);
} else {
assert false;
}
}
/* Process remaining queued LNs. */
if (lookAheadCache != null) {
while (!lookAheadCache.isEmpty()) {
if (Cleaner.DO_CRITICAL_EVICTION) {
env.daemonEviction(true /*backgroundIO*/);
}
processLN(fileNum, location, lookAheadCache, dbCache);
/* Sleep if background read/write limit was exceeded. */
env.sleepAfterBackgroundIO();
}
}
/* Update the pending DB set. */
if (checkPendingDbSet != null) {
for (Iterator<DatabaseId> i = checkPendingDbSet.iterator();
i.hasNext();) {
final DatabaseId pendingDbId = i.next();
final DatabaseImpl db = dbMapTree.getDb
(pendingDbId, cleaner.lockTimeout, dbCache);
cleaner.addPendingDB(db);
}
}
/* Update reader stats. */
nEntriesReadThisRun = reader.getNumRead();
nRepeatIteratorReadsThisRun = reader.getNRepeatIteratorReads();
} catch (ChecksumException e) {
throw new EnvironmentFailureException
(env, EnvironmentFailureReason.LOG_CHECKSUM, e);
} finally {
/* Subtract the overhead of this method from the budget. */
budget.updateAdminMemoryUsage(0 - adjustMem);
/* Release all cached DBs. */
dbMapTree.releaseDbs(dbCache);
}
/* File is fully processed, update status information. */
if (!calcUtilizationOnly) {
fileSelector.addCleanedFile(fileNum, databases,
reader.getLastVLSN(), budget);
}
return true;
}