* be the same as the memory overhead.
*/
int adjustMem = (2 * readBufferSize) +
obsoleteOffsets.getLogSize() +
lookAheadCacheSize;
MemoryBudget budget = env.getMemoryBudget();
budget.updateAdminMemoryUsage(adjustMem);
/* Evict after updating the budget. */
if (Cleaner.DO_CRITICAL_EVICTION) {
env.criticalEviction(true /*backgroundIO*/);
}
/*
* We keep a look ahead cache of non-obsolete LNs. When we lookup a
* BIN in processLN, we also process any other LNs in that BIN that are
* in the cache. This can reduce the number of tree lookups.
*/
LookAheadCache lookAheadCache = new LookAheadCache(lookAheadCacheSize);
/*
* For obsolete entries we must check for pending deleted DBs. To
* avoid the overhead of DbTree.getDb on every entry we keep a set of
* all DB IDs encountered and do the check once per DB at the end.
*/
Set<DatabaseId> checkPendingDbSet = new HashSet<DatabaseId>();
/*
* Use local caching to reduce DbTree.getDb overhead. Do not call
* releaseDb after getDb with the dbCache, since the entire dbCache
* will be released at the end of thie method.
*/
Map<DatabaseId, DatabaseImpl> dbCache =
new HashMap<DatabaseId, DatabaseImpl>();
DbTree dbMapTree = env.getDbTree();
/* Keep track of all database IDs encountered. */
Set<DatabaseId> databases = new HashSet<DatabaseId>();
/* Keep track of last VLSN encountered. */
VLSN lastVLSN = VLSN.NULL_VLSN;
try {
/* Create the file reader. */
CleanerFileReader reader = new CleanerFileReader
(env, readBufferSize, DbLsn.makeLsn(fileNum, 0), fileNum);
/* Validate all entries before ever deleting a file. */
reader.setAlwaysValidateChecksum(true);
TreeLocation location = new TreeLocation();
int nProcessedLNs = 0;
while (reader.readNextEntryAllowExceptions()) {
cleaner.nEntriesRead.increment();
long logLsn = reader.getLastLsn();
long fileOffset = DbLsn.getFileOffset(logLsn);
boolean isLN = reader.isLN();
boolean isIN = reader.isIN();
boolean isRoot = reader.isRoot();
boolean isObsolete = false;
/* Maintain a set of all databases encountered. */
DatabaseId dbId = reader.getDatabaseId();
if (dbId != null) {
databases.add(dbId);
}
/* Maintain last VLSN encountered. */
VLSN vlsn = reader.getVLSN();
if (vlsn != null) {
assert (vlsn.compareTo(lastVLSN) > 0) :
"vlsns out of order, last=" + lastVLSN + " current=" +
vlsn;
lastVLSN = vlsn;
}
/* Remember the version of the log file. */
if (reader.isFileHeader()) {
fileLogVersion = reader.getFileHeader().getLogVersion();
}
/* Stop if the daemon is shut down. */
if (env.isClosing()) {
return false;
}
/* Update background reads. */
int nReads = reader.getAndResetNReads();
if (nReads > 0) {
env.updateBackgroundReads(nReads);
}
/* Sleep if background read/write limit was exceeded. */
env.sleepAfterBackgroundIO();
/* Check for a known obsolete node. */
while (nextObsolete < fileOffset && obsoleteIter.hasNext()) {
nextObsolete = obsoleteIter.next();
}
if (nextObsolete == fileOffset) {
isObsolete = true;
}
/* Check for the entry type next because it is very cheap. */
if (!isObsolete &&
!isLN &&
!isIN &&
!isRoot) {
/* Consider all entries we do not process as obsolete. */
isObsolete = true;
}
/*
* SR 14583: In JE 2.0 and later we can assume that all
* deleted LNs are obsolete. Either the delete committed and
* the BIN parent is marked with a pending deleted bit, or the
* delete rolled back, in which case there is no reference
* to this entry. JE 1.7.1 and earlier require a tree lookup
* because deleted LNs may still be reachable through their BIN
* parents.
*/
if (!isObsolete &&
isLN &&
reader.getLN().isDeleted() &&
fileLogVersion > 2) {
/* Deleted LNs are always obsolete. */
isObsolete = true;
}
/* Check the current tracker last, as it is more expensive. */
if (!isObsolete &&
tfs != null &&
tfs.containsObsoleteOffset(fileOffset)) {
isObsolete = true;
}
/* Skip known obsolete nodes. */
if (isObsolete) {
/* Count obsolete stats. */
if (isLN) {
nLNsObsoleteThisRun++;
} else if (isIN) {
nINsObsoleteThisRun++;
}
/* Must update the pending DB set for obsolete entries. */
if (dbId != null) {
checkPendingDbSet.add(dbId);
}
continue;
}
/* Evict before processing each entry. */
if (Cleaner.DO_CRITICAL_EVICTION) {
env.criticalEviction(true /*backgroundIO*/);
}
/* The entry is not known to be obsolete -- process it now. */
if (isLN) {
LN targetLN = reader.getLN();
byte[] key = reader.getKey();
byte[] dupKey = reader.getDupTreeKey();
lookAheadCache.add
(Long.valueOf(DbLsn.getFileOffset(logLsn)),
new LNInfo(targetLN, dbId, key, dupKey));
if (lookAheadCache.isFull()) {
processLN(fileNum, location, lookAheadCache, dbCache);
}
/*
* Process pending LNs before proceeding in order to
* prevent the pending list from growing too large.
*/
nProcessedLNs += 1;
if (nProcessedLNs % PROCESS_PENDING_EVERY_N_LNS == 0) {
cleaner.processPending();
}
} else if (isIN) {
IN targetIN = reader.getIN();
DatabaseImpl db = dbMapTree.getDb
(dbId, cleaner.lockTimeout, dbCache);
targetIN.setDatabase(db);
processIN(targetIN, db, logLsn);
} else if (isRoot) {
env.rewriteMapTreeRoot(logLsn);
} else {
assert false;
}
}
/* Process remaining queued LNs. */
while (!lookAheadCache.isEmpty()) {
if (Cleaner.DO_CRITICAL_EVICTION) {
env.criticalEviction(true /*backgroundIO*/);
}
processLN(fileNum, location, lookAheadCache, dbCache);
/* Sleep if background read/write limit was exceeded. */
env.sleepAfterBackgroundIO();
}
/* Update the pending DB set. */
for (Iterator<DatabaseId> i = checkPendingDbSet.iterator();
i.hasNext();) {
DatabaseId pendingDbId = i.next();
DatabaseImpl db = dbMapTree.getDb
(pendingDbId, cleaner.lockTimeout, dbCache);
cleaner.addPendingDB(db);
}
/* Update reader stats. */
nEntriesReadThisRun = reader.getNumRead();
nRepeatIteratorReadsThisRun = reader.getNRepeatIteratorReads();
} catch (ChecksumException e) {
throw new EnvironmentFailureException
(env, EnvironmentFailureReason.LOG_CHECKSUM, e);
} finally {
/* Subtract the overhead of this method from the budget. */
budget.updateAdminMemoryUsage(0 - adjustMem);
/* Release all cached DBs. */
dbMapTree.releaseDbs(dbCache);
/* Allow flushing of TFS when cleaning is complete. */