/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 2002-2005
* Sleepycat Software. All rights reserved.
*
* $Id: UtilizationProfile.java,v 1.34 2005/09/21 17:49:45 linda Exp $
*/
package com.sleepycat.je.cleaner;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.logging.Level;
import com.sleepycat.je.DatabaseConfig;
import com.sleepycat.je.DatabaseEntry;
import com.sleepycat.je.DatabaseException;
import com.sleepycat.je.DbInternal;
import com.sleepycat.je.OperationStatus;
import com.sleepycat.je.TransactionConfig;
import com.sleepycat.je.config.EnvironmentParams;
import com.sleepycat.je.dbi.CursorImpl;
import com.sleepycat.je.dbi.DatabaseId;
import com.sleepycat.je.dbi.DatabaseImpl;
import com.sleepycat.je.dbi.DbTree;
import com.sleepycat.je.dbi.EnvironmentImpl;
import com.sleepycat.je.dbi.MemoryBudget;
import com.sleepycat.je.dbi.CursorImpl.SearchMode;
import com.sleepycat.je.log.FileManager;
import com.sleepycat.je.log.entry.LNLogEntry;
import com.sleepycat.je.tree.BIN;
import com.sleepycat.je.tree.FileSummaryLN;
import com.sleepycat.je.tree.Tree;
import com.sleepycat.je.tree.TreeLocation;
import com.sleepycat.je.txn.AutoTxn;
import com.sleepycat.je.txn.BasicLocker;
import com.sleepycat.je.txn.LockType;
import com.sleepycat.je.txn.Locker;
import com.sleepycat.je.utilint.DbLsn;
/**
* The UP tracks utilization summary information for all log files.
*
* <p>Unlike the UtilizationTracker, the UP is not accessed under the log write
* latch and is instead synchronized on itself. It is accessed by four other
* entities: the cleaner, the checkpointer, the compressor, and the recovery
* manager. It is not accessed during the primary data access path, except
* for when committing the Database truncate and remove operations.</p>
*
* <p>The cleaner will ask the UP to populate its cache in order to determine
* the total log size or to select the best file for cleaning. The UP will
* then read all records in the UP database that are not already cached. The
* checkpointer calls putFileSummary to write file summary LNs to the log.</p>
*
* <p>Because this object is synchronized it is possible that the cleaner will
* hold up the checkpointer if the cleaner is populating its cache or
* calculating the best file, and the checkpointer tries to write the file
* summary LNs to the log. This blocking is acceptable. Deadlocks will not
* occur since calls are always from the checkpointer or cleaner to the UP, and
* not in the other direction.</p>
*/
public class UtilizationProfile {
/*
* Note that age is a distance between files not a number of files, that
* is, deleted files are counted in the age.
*/
private EnvironmentImpl env;
private UtilizationTracker tracker;
private DatabaseImpl fileSummaryDb; // stored fileNum -> FileSummary
private SortedMap fileSummaryMap; // cached fileNum -> FileSummary
private boolean cachePopulated;
private boolean rmwFixEnabled;
/* Minimum utilization threshold that triggers cleaning. */
private int minUtilization;
/*
* Minumum age to qualify for cleaning. If the first active LSN file is 5
* and the mininum age is 2, file 4 won't qualify but file 3 will. Must be
* greater than zero because we never clean the first active LSN file.
*/
private int minAge;
/**
* Creates an empty UP.
*/
public UtilizationProfile(EnvironmentImpl env,
UtilizationTracker tracker)
throws DatabaseException {
this.env = env;
this.tracker = tracker;
fileSummaryMap = new TreeMap();
minAge = env.getConfigManager().getInt
(EnvironmentParams.CLEANER_MIN_AGE);
minUtilization = env.getConfigManager().getInt
(EnvironmentParams.CLEANER_MIN_UTILIZATION);
rmwFixEnabled = env.getConfigManager().getBoolean
(EnvironmentParams.CLEANER_RMW_FIX);
}
/**
* @see EnvironmentParams#CLEANER_RMW_FIX
* @see FileSummaryLN#postFetchInit
*/
public boolean isRMWFixEnabled() {
return rmwFixEnabled;
}
/**
* Returns the number of files in the profile.
*/
synchronized int getNumberOfFiles()
throws DatabaseException {
boolean opened = populateCache();
assert opened;
return fileSummaryMap.size();
}
/**
* Returns the cheapest file to clean from the given list of files. This
* method is used to select the first file to be cleaned in the batch of
* to-be-cleaned files.
*/
synchronized Long getCheapestFileToClean(List files)
throws DatabaseException {
if (files.size() == 1) {
return (Long) files.get(0);
}
boolean opened = populateCache();
assert opened;
Long bestFile = null;
int bestCost = Integer.MAX_VALUE;
for (int i = 0; i < files.size(); i += 1) {
Long file = (Long) files.get(i);
long fileNum = file.longValue();
/* Calculate this file's cost to clean. */
FileSummary summary = (FileSummary) fileSummaryMap.get(file);
summary = addTrackedSummary(summary, fileNum);
int thisCost = summary.getNonObsoleteCount();
/* Select this file if it has the lowest cost so far. */
if (bestFile == null || thisCost < bestCost) {
bestFile = file;
bestCost = thisCost;
}
}
return bestFile;
}
/**
* Returns the best file that qualifies for cleaning, or null if no file
* qualifies.
*
* @param fileSelector is used to determine valid cleaning candidates.
*
* @param forceCleaning is true to always select a file, even if its
* utilization is above the minimum utilization threshold.
*
* @param lowUtilizationFiles is a returned set of files that are below the
* minimum utilization threshold.
*/
synchronized Long getBestFileForCleaning(FileSelector fileSelector,
boolean forceCleaning,
Set lowUtilizationFiles)
throws DatabaseException {
/* Start with an empty set.*/
if (lowUtilizationFiles != null) {
lowUtilizationFiles.clear();
}
/* Populate the cache. */
boolean opened = populateCache();
assert opened;
/* Paranoia. There should always be 1 file. */
if (fileSummaryMap.size() == 0) {
return null;
}
/* There must have been at least one checkpoint previously. */
long firstActiveLsn = env.getCheckpointer().getFirstActiveLsn();
if (firstActiveLsn == DbLsn.NULL_LSN) {
return null;
}
/* Calculate totals and find the best file. */
Iterator iter = fileSummaryMap.keySet().iterator();
Long bestFile = null;
int bestUtilization = 101;
long totalSize = 0;
long totalObsoleteSize = 0;
while (iter.hasNext()) {
Long file = (Long) iter.next();
long fileNum = file.longValue();
/* Calculate this file's utilization. */
FileSummary summary = (FileSummary) fileSummaryMap.get(file);
summary = addTrackedSummary(summary, fileNum);
int obsoleteSize = summary.getObsoleteSize();
/*
* If the file is already being cleaned, only total the
* non-obsolete amount. This is an optimistic prediction of the
* results of cleaning, and is used to prevent over-cleaning.
*/
if (fileSelector.isFileCleaningInProgress(file)) {
totalSize += summary.totalSize - obsoleteSize;
continue;
}
/* Add this file's value to the totals. */
totalObsoleteSize += obsoleteSize;
totalSize += summary.totalSize;
/* If the file is too young to be cleaned, skip it. */
if (DbLsn.getFileNumber(firstActiveLsn) - fileNum < minAge) {
continue;
}
/* Select this file if it has the lowest utilization so far. */
int thisUtilization = utilization(obsoleteSize, summary.totalSize);
if (bestFile == null || thisUtilization < bestUtilization) {
bestFile = file;
bestUtilization = thisUtilization;
}
/* Return all low utilization files. */
if (lowUtilizationFiles != null &&
thisUtilization < minUtilization) {
lowUtilizationFiles.add(file);
}
}
/*
* Return the best file if we are under the minimum utilization or
* we're cleaning aggressively.
*/
int totalUtilization = utilization(totalObsoleteSize, totalSize);
if (forceCleaning || totalUtilization < minUtilization) {
return bestFile;
} else {
return null;
}
}
/**
* Calculate the utilization percentage.
*/
public static int utilization(long obsoleteSize, long totalSize) {
if (totalSize != 0) {
return (int) (((totalSize - obsoleteSize) * 100) / totalSize);
} else {
return 0;
}
}
/**
* Add the tracked summary, if one exists, to the base summary.
*/
private FileSummary addTrackedSummary(FileSummary summary, long fileNum) {
TrackedFileSummary trackedSummary = tracker.getTrackedFile(fileNum);
if (trackedSummary != null) {
FileSummary totals = new FileSummary();
totals.add(summary);
totals.add(trackedSummary);
summary = totals;
}
return summary;
}
/**
* Count the given tracked info as obsolete and then log the summaries.
*/
public void countAndLogSummaries(TrackedFileSummary[] summaries)
throws DatabaseException {
/* Count tracked info under the log write latch. */
env.getLogManager().countObsoleteNodes(summaries);
/* Utilization flushing may be disabled for unittests. */
if (!DbInternal.getCheckpointUP
(env.getConfigManager().getEnvironmentConfig())) {
return;
}
/* Write out the modified file summaries. */
for (int i = 0; i < summaries.length; i += 1) {
long fileNum = summaries[i].getFileNumber();
TrackedFileSummary tfs = tracker.getTrackedFile(fileNum);
if (tfs != null) {
flushFileSummary(tfs);
}
}
}
/**
* Returns a copy of the current file summary map, optionally including
* tracked summary information, for use by the DbSpace utility and by unit
* tests. The returned map's key is a Long file number and its value is a
* FileSummary.
*/
public synchronized SortedMap getFileSummaryMap(boolean includeTrackedFiles)
throws DatabaseException {
boolean opened = populateCache();
assert opened;
if (includeTrackedFiles) {
TreeMap map = new TreeMap();
Iterator iter = fileSummaryMap.keySet().iterator();
while (iter.hasNext()) {
Long file = (Long) iter.next();
long fileNum = file.longValue();
FileSummary summary = (FileSummary) fileSummaryMap.get(file);
summary = addTrackedSummary(summary, fileNum);
map.put(file, summary);
}
TrackedFileSummary[] trackedFiles = tracker.getTrackedFiles();
for (int i = 0; i < trackedFiles.length; i += 1) {
TrackedFileSummary summary = trackedFiles[i];
long fileNum = summary.getFileNumber();
Long file = new Long(fileNum);
if (!map.containsKey(file)) {
map.put(file, summary);
}
}
return map;
} else {
return new TreeMap(fileSummaryMap);
}
}
/**
* Clears the cache of file summary info. The cache starts out unpopulated
* and is populated on the first call to getBestFileForCleaning.
*/
public synchronized void clearCache() {
int memorySize = fileSummaryMap.size() *
MemoryBudget.UTILIZATION_PROFILE_ENTRY;
MemoryBudget mb = env.getMemoryBudget();
mb.updateMiscMemoryUsage(0 - memorySize);
fileSummaryMap = new TreeMap();
cachePopulated = false;
}
/**
* Removes a file from the utilization database and the profile, after it
* has been deleted by the cleaner.
*/
synchronized void removeFile(Long fileNum)
throws DatabaseException {
boolean opened = populateCache();
assert opened;
/* Remove from the cache. */
if (fileSummaryMap.remove(fileNum) != null) {
MemoryBudget mb = env.getMemoryBudget();
mb.updateMiscMemoryUsage
(0 - MemoryBudget.UTILIZATION_PROFILE_ENTRY);
}
/* Delete from the summary db. */
deleteFileSummary(fileNum);
}
/**
* For the LN at the cursor position deletes all LNs for the file.
*/
private void deleteFileSummary(Long fileNum)
throws DatabaseException {
Locker locker = null;
CursorImpl cursor = null;
try {
locker = new BasicLocker(env);
cursor = new CursorImpl(fileSummaryDb, locker);
/* Prevent recursion when synchronized on this object. */
cursor.setAllowEviction(false);
DatabaseEntry keyEntry = new DatabaseEntry();
DatabaseEntry dataEntry = new DatabaseEntry();
long fileNumVal = fileNum.longValue();
/* Search by file number. */
if (!getFirstFSLN
(cursor, fileNumVal, keyEntry, dataEntry, LockType.WRITE)) {
return;
}
/* Delete all LNs for this file number. */
OperationStatus status = OperationStatus.SUCCESS;
while (status == OperationStatus.SUCCESS) {
FileSummaryLN ln = (FileSummaryLN)
cursor.getCurrentLN(LockType.NONE);
if (ln != null) {
/* Stop if the file number changes. */
if (fileNumVal != ln.getFileNumber(keyEntry.getData())) {
break;
}
TrackedFileSummary tfs =
tracker.getTrackedFile(fileNumVal);
/* Associate the tracked summary so it will be cleared. */
if (tfs != null) {
ln.setTrackedSummary(tfs);
}
/*
* Do not evict after deleting since the compressor would
* have to fetch it again.
*/
cursor.delete();
}
status = cursor.getNext
(keyEntry, dataEntry, LockType.WRITE,
true, // forward
false); // alreadyLatched
}
} finally {
if (cursor != null) {
cursor.releaseBINs();
cursor.close();
}
if (locker != null) {
locker.operationEnd();
}
}
}
/**
* Updates and stores the FileSummary for a given tracked file, if flushing
* of the summary is allowed.
*/
public void flushFileSummary(TrackedFileSummary tfs)
throws DatabaseException {
if (tfs.getAllowFlush()) {
putFileSummary(tfs);
}
}
/**
* Updates and stores the FileSummary for a given tracked file.
*/
private synchronized PackedOffsets putFileSummary(TrackedFileSummary tfs)
throws DatabaseException {
if (env.isReadOnly()) {
throw new DatabaseException
("Cannot write file summary in a read-only environment");
}
if (tfs.isEmpty()) {
return null; // no delta
}
if (!populateCache()) {
/* Db does not exist and this is a read-only environment. */
return null;
}
long fileNum = tfs.getFileNumber();
Long fileNumLong = new Long(fileNum);
/* Get existing file summary or create an empty one. */
FileSummary summary = (FileSummary) fileSummaryMap.get(fileNumLong);
if (summary == null) {
/*
* An obsolete node may have been counted after its file was
* deleted, for example, when compressing a BIN. Do not insert
* a new profile record if no corresponding log file exists.
*/
File file = new File
(env.getFileManager().getFullFileName
(fileNum, FileManager.JE_SUFFIX));
if (!file.exists()) {
return null;
}
summary = new FileSummary();
}
/*
* The key discriminator is a sequence that must be increasing over the
* life of the file. We use the sum of all entries counted. We must
* add the tracked and current summaries here to calculate the key.
*/
FileSummary tmp = new FileSummary();
tmp.add(summary);
tmp.add(tfs);
int sequence = tmp.getEntriesCounted();
/* Insert an LN with the existing and tracked summary info. */
FileSummaryLN ln = new FileSummaryLN(summary);
ln.setTrackedSummary(tfs);
insertFileSummary(ln, fileNum, sequence);
/* Cache the updated summary object. */
summary = ln.getBaseSummary();
if (fileSummaryMap.put(fileNumLong, summary) == null) {
MemoryBudget mb = env.getMemoryBudget();
mb.updateMiscMemoryUsage
(MemoryBudget.UTILIZATION_PROFILE_ENTRY);
}
return ln.getObsoleteOffsets();
}
/**
* Returns the stored/packed obsolete offsets and the tracked obsolete
* offsets for the given file. The tracked summary object returned can be
* used to test for obsolete offsets that are being added during cleaning
* by other threads participating in lazy migration. The caller must call
* TrackedFileSummary.setAllowFlush(true) when cleaning is complete.
*/
TrackedFileSummary getObsoleteDetail(Long fileNum,
PackedOffsets packedOffsets)
throws DatabaseException {
/* Return if no detail is being tracked. */
if (!tracker.getTrackDetail()) {
return null;
}
boolean opened = populateCache();
assert opened;
long fileNumVal = fileNum.longValue();
List list = new ArrayList();
/*
* Get an unflushable summary that will remain valid for the duration
* of file cleaning.
*/
TrackedFileSummary tfs =
env.getLogManager().getUnflushableTrackedSummary(fileNumVal);
/* Read the summary db. */
Locker locker = null;
CursorImpl cursor = null;
try {
/*
* Preventing eviction/recursion is not necessary here because we
* are not synchronized on this object. Eviction may be beneficial
* since we are reading several largish records.
*/
locker = new BasicLocker(env);
cursor = new CursorImpl(fileSummaryDb, locker);
DatabaseEntry keyEntry = new DatabaseEntry();
DatabaseEntry dataEntry = new DatabaseEntry();
/* Search by file number. */
OperationStatus status = OperationStatus.SUCCESS;
if (!getFirstFSLN
(cursor, fileNumVal, keyEntry, dataEntry, LockType.NONE)) {
status = OperationStatus.NOTFOUND;
}
/* Read all LNs for this file number. */
while (status == OperationStatus.SUCCESS) {
FileSummaryLN ln = (FileSummaryLN)
cursor.getCurrentLN(LockType.NONE);
if (ln != null) {
/* Stop if the file number changes. */
if (fileNumVal != ln.getFileNumber(keyEntry.getData())) {
break;
}
PackedOffsets offsets = ln.getObsoleteOffsets();
if (offsets != null) {
list.add(offsets.toArray());
}
/* Always evict after using a file summary LN. */
cursor.evict();
}
status = cursor.getNext
(keyEntry, dataEntry, LockType.NONE,
true, // forward
false); // alreadyLatched
}
} finally {
if (cursor != null) {
cursor.releaseBINs();
cursor.close();
}
if (locker != null) {
locker.operationEnd();
}
}
/*
* Write out tracked detail, if any, and add its offsets to the list.
*/
if (!tfs.isEmpty()) {
PackedOffsets offsets = putFileSummary(tfs);
if (offsets != null) {
list.add(offsets.toArray());
}
}
/* Merge all offsets into a single array and pack the result. */
int size = 0;
for (int i = 0; i < list.size(); i += 1) {
long[] a = (long[]) list.get(i);
size += a.length;
}
long[] offsets = new long[size];
int index = 0;
for (int i = 0; i < list.size(); i += 1) {
long[] a = (long[]) list.get(i);
System.arraycopy(a, 0, offsets, index, a.length);
index += a.length;
}
assert index == offsets.length;
packedOffsets.pack(offsets);
return tfs;
}
/**
* Populate the profile for file selection.
*/
private synchronized boolean populateCache()
throws DatabaseException {
/* Do nothing if cache is already populated. */
if (cachePopulated) {
return true;
}
/* Open the file summary db on first use. */
if (!openFileSummaryDatabase()) {
/* Db does not exist and this is a read-only environment. */
return false;
}
int oldMemorySize = fileSummaryMap.size() *
MemoryBudget.UTILIZATION_PROFILE_ENTRY;
/*
* It is possible to have an undeleted FileSummaryLN in the database
* for a deleted log file if we crash after deleting a file but before
* deleting the FileSummaryLN. Iterate through all FileSummaryLNs and
* add them to the cache if their corresponding log file exists. But
* delete those records that have no corresponding log file.
*/
Long[] existingFiles = env.getFileManager().getAllFileNumbers();
Locker locker = null;
CursorImpl cursor = null;
try {
locker = new BasicLocker(env);
cursor = new CursorImpl(fileSummaryDb, locker);
/* Prevent recursion when synchronized on this object. */
cursor.setAllowEviction(false);
DatabaseEntry keyEntry = new DatabaseEntry();
DatabaseEntry dataEntry = new DatabaseEntry();
if (cursor.positionFirstOrLast(true, null)) {
/* Retrieve the first record. */
OperationStatus status =
cursor.getCurrentAlreadyLatched(keyEntry, dataEntry,
LockType.NONE, true);
if (status != OperationStatus.SUCCESS) {
/* The record we're pointing at may be deleted. */
status = cursor.getNext(keyEntry, dataEntry, LockType.NONE,
true, // go forward
false); // do need to latch
}
while (status == OperationStatus.SUCCESS) {
FileSummaryLN ln = (FileSummaryLN)
cursor.getCurrentLN(LockType.NONE);
if (ln == null) {
/* Advance past a cleaned record. */
status = cursor.getNext
(keyEntry, dataEntry, LockType.NONE,
true, // go forward
false); // do need to latch
continue;
}
byte[] keyBytes = keyEntry.getData();
boolean isOldVersion = ln.hasStringKey(keyBytes);
long fileNum = ln.getFileNumber(keyBytes);
Long fileNumLong = new Long(fileNum);
if (Arrays.binarySearch(existingFiles, fileNumLong) >= 0) {
/* File exists, cache the FileSummaryLN. */
fileSummaryMap.put(fileNumLong, ln.getBaseSummary());
/*
* Update old version records to the new version. A
* zero sequence number is used to distinguish the
* converted records and to ensure that later records
* will have a greater sequence number.
*/
if (isOldVersion) {
insertFileSummary(ln, fileNum, 0);
cursor.latchBIN();
cursor.delete();
cursor.releaseBIN();
} else {
/* Always evict after using a file summary LN. */
cursor.evict();
}
} else {
/*
* File does not exist, remove the summary from the map
* and delete all FileSummaryLN records.
*/
fileSummaryMap.remove(fileNumLong);
if (isOldVersion) {
cursor.latchBIN();
cursor.delete();
cursor.releaseBIN();
} else {
deleteFileSummary(fileNumLong);
}
/*
* Do not evict after deleting since the compressor
* would have to fetch it again.
*/
}
/* Go on to the next entry. */
if (isOldVersion) {
/* Advance past the single old version record. */
status = cursor.getNext
(keyEntry, dataEntry, LockType.NONE,
true, // go forward
false); // do need to latch
} else {
/*
* Skip over other records for this file by adding one
* to the file number and doing a range search.
*/
if (!getFirstFSLN
(cursor,
fileNum + 1,
keyEntry, dataEntry,
LockType.NONE)) {
status = OperationStatus.NOTFOUND;
}
}
}
}
} finally {
if (cursor != null) {
cursor.releaseBINs();
cursor.close();
}
if (locker != null) {
locker.operationEnd();
}
int newMemorySize = fileSummaryMap.size() *
MemoryBudget.UTILIZATION_PROFILE_ENTRY;
MemoryBudget mb = env.getMemoryBudget();
mb.updateMiscMemoryUsage(newMemorySize - oldMemorySize);
}
cachePopulated = true;
return true;
}
/**
* Positions at the most recent LN for the given file number.
*/
private boolean getFirstFSLN(CursorImpl cursor,
long fileNum,
DatabaseEntry keyEntry,
DatabaseEntry dataEntry,
LockType lockType)
throws DatabaseException {
byte[] keyBytes = FileSummaryLN.makePartialKey(fileNum);
keyEntry.setData(keyBytes);
int result = cursor.searchAndPosition(keyEntry,
dataEntry,
SearchMode.SET_RANGE,
lockType);
if ((result & CursorImpl.FOUND) == 0) {
return false;
}
boolean exactKeyMatch = ((result & CursorImpl.EXACT_KEY) != 0);
if (exactKeyMatch &&
cursor.getCurrentAlreadyLatched
(keyEntry, dataEntry, lockType, true) !=
OperationStatus.KEYEMPTY) {
return true;
}
OperationStatus status = cursor.getNext
(keyEntry, dataEntry, lockType,
true, // forward
!exactKeyMatch); // alreadyLatched
return status == OperationStatus.SUCCESS;
}
/**
* If the file summary db is already open, return, otherwise attempt to
* open it. If the environment is read-only and the database doesn't
* exist, return false. If the environment is read-write the database will
* be created if it doesn't exist.
*/
private synchronized boolean openFileSummaryDatabase()
throws DatabaseException {
if (fileSummaryDb != null) {
return true;
}
DbTree dbTree = env.getDbMapTree();
Locker autoTxn = null;
boolean operationOk = false;
try {
/* This method is used during eviction -- don't recurse. */
autoTxn = new AutoTxn(env, new TransactionConfig());
DatabaseImpl db = dbTree.getDb
(autoTxn, DbTree.UTILIZATION_DB_NAME, null,
false /* No eviction allowed. */);
if (db == null) {
if (env.isReadOnly()) {
return false;
}
db = dbTree.createDb
(autoTxn, DbTree.UTILIZATION_DB_NAME,
new DatabaseConfig(), null,
false /* No eviction allowed */);
}
fileSummaryDb = db;
operationOk = true;
return true;
} finally {
if (autoTxn != null) {
autoTxn.operationEnd(operationOk);
}
}
}
/**
* Insert the given LN with the given key values.
*/
private void insertFileSummary(FileSummaryLN ln,
long fileNum,
int sequence)
throws DatabaseException {
byte[] keyBytes = FileSummaryLN.makeFullKey(fileNum, sequence);
Locker locker = null;
CursorImpl cursor = null;
try {
locker = new BasicLocker(env);
cursor = new CursorImpl(fileSummaryDb, locker);
/* Prevent recursion when synchronized on this object. */
cursor.setAllowEviction(false);
/* Insert the LN. */
OperationStatus status = cursor.putLN(keyBytes, ln, false);
if (status == OperationStatus.KEYEXIST) {
env.getLogger().log
(Level.SEVERE,
"Cleaner duplicate key sequence file=0x" +
Long.toHexString(fileNum) + " sequence=0x" +
Long.toHexString(sequence));
}
/* Always evict after using a file summary LN. */
cursor.evict();
} finally {
if (cursor != null) {
cursor.close();
}
if (locker != null) {
locker.operationEnd();
}
}
}
/**
* Checks that all FSLN offsets are indeed obsolete. Assumes that the
* system is quiesent (does not lock LNs). This method is not synchronized
* because it doesn't access fileSummaryMap.
*
* @return true if no verification failures.
*/
public boolean verifyFileSummaryDatabase()
throws DatabaseException {
DatabaseEntry key = new DatabaseEntry();
DatabaseEntry data = new DatabaseEntry();
openFileSummaryDatabase();
Locker locker = null;
CursorImpl cursor = null;
boolean ok = true;
try {
locker = new BasicLocker(env);
cursor = new CursorImpl(fileSummaryDb, locker);
if (cursor.positionFirstOrLast(true, null)) {
OperationStatus status = cursor.getCurrentAlreadyLatched
(key, data, LockType.NONE, true);
/* Iterate over all file summary lns. */
while (status == OperationStatus.SUCCESS) {
FileSummaryLN ln = (FileSummaryLN)
cursor.getCurrentLN(LockType.NONE);
if (ln != null) {
long fileNumVal = ln.getFileNumber(key.getData());
PackedOffsets offsets = ln.getObsoleteOffsets();
/*
* Check every offset in the fsln to make sure it's
* truely obsolete.
*/
if (offsets != null) {
long[] vals = offsets.toArray();
for (int i = 0; i < vals.length; i++) {
long lsn = DbLsn.makeLsn(fileNumVal, vals[i]);
if (!verifyLsnIsObsolete(lsn)) {
ok = false;
}
}
}
cursor.evict();
status = cursor.getNext(key, data, LockType.NONE,
true, // forward
false); // already latched
}
}
}
} finally {
if (cursor != null) {
cursor.close();
}
if (locker != null) {
locker.operationEnd();
}
}
return ok;
}
/*
* Return true if the LN at this lsn is obsolete.
*/
private boolean verifyLsnIsObsolete(long lsn)
throws DatabaseException {
/* Read the whole entry out of the log. */
Object o = env.getLogManager().getLogEntry(lsn);
if (!(o instanceof LNLogEntry)) {
return true;
}
LNLogEntry entry = (LNLogEntry)o;
/* All deleted LNs are obsolete. */
if (entry.getLN().isDeleted()) {
return true;
}
/* Find the owning database. */
DatabaseId dbId = entry.getDbId();
DatabaseImpl db = env.getDbMapTree().getDb(dbId);
/* The whole database is gone, so this LN is obsolete. */
if (db == null || db.getIsDeleted()) {
return true;
}
/*
* Search down to the bottom most level for the parent of this LN.
*/
BIN bin = null;
try {
Tree tree = db.getTree();
TreeLocation location = new TreeLocation();
boolean parentFound = tree.getParentBINForChildLN
(location,
entry.getKey(),
entry.getDupKey(),
entry.getLN(),
false, // splitsAllowed
true, // findDeletedEntries
false, // searchDupTree ???
false); // updateGeneration
bin = location.bin;
int index = location.index;
/* Is bin latched ? */
if (!parentFound) {
return true;
}
/*
* Now we're at the parent for this LN, whether BIN, DBIN or DIN.
* If knownDeleted, LN is deleted and can be purged.
*/
if (bin.isEntryKnownDeleted(index)) {
return true;
}
if (bin.getLsn(index) != lsn) {
return true;
}
/* Oh no -- this lsn is in the tree. */
/* should print, or trace? */
System.err.println("lsn " + DbLsn.getNoFormatString(lsn)+
" was found in tree.");
return false;
} finally {
if (bin != null) {
bin.releaseLatch();
}
}
}
}