redoScan = (StreamLogScan)
openForwardsScan(start, (LogInstant)null);
}
// open a transaction that is used for redo and rollback
RawTransaction recoveryTransaction =
tf.startTransaction(
rsf,
ContextService.getFactory().getCurrentContextManager(),
AccessFactoryGlobals.USER_TRANS_NAME);
// make this transaction aware that it is a recovery transaction
// and don't spew forth post commit work while replaying the log
recoveryTransaction.recoveryTransaction();
/////////////////////////////////////////////////////////////
//
// Redo loop - in FileLogger
//
/////////////////////////////////////////////////////////////
//
// set log factory state to inRedo so that if redo caused any
// dirty page to be written from the cache, it won't flush the
// log since the end of the log has not been determined and we
// know the log record that caused the page to change has
// already been written to the log. We need the page write to
// go thru the log factory because if the redo has a problem,
// the log factory is corrupt and the only way we know not to
// write out the page in a checkpoint is if it check with the
// log factory, and that is done via a flush - we use the WAL
// protocol to stop corrupt pages from writing to the disk.
//
inRedo = true;
long logEnd =
logger.redo(
recoveryTransaction, tf, redoScan, redoLWM,
ttabInstant);
inRedo = false;
// if we are only interested in dumping the log, don't alter
// the database and prevent anyone from using the log
if (SanityManager.DEBUG)
{
if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY))
{
Monitor.logMessage("_____________________________________________________");
Monitor.logMessage("\n\t\t Log dump finished");
Monitor.logMessage("_____________________________________________________");
// just in case, it has not been set anyway
logOut = null;
return;
}
}
/////////////////////////////////////////////////////////////
//
// determine where the log ends
//
/////////////////////////////////////////////////////////////
StorageRandomAccessFile theLog = null;
// if logend == LogCounter.INVALID_LOG_SCAN, that means there
// is no log record in the log - most likely it is corrupted in
// some way ...
if (logEnd == LogCounter.INVALID_LOG_INSTANT)
{
Monitor.logTextMessage(MessageId.LOG_LOG_NOT_FOUND);
StorageFile logFile = getLogFileName(logFileNumber);
if (privExists(logFile))
{
// if we can delete this strange corrupted file, do so,
// otherwise, skip it
if (!privDelete(logFile))
{
logFile = getLogFileName(++logFileNumber);
}
}
try
{
theLog = privRandomAccessFile(logFile, "rw");
}
catch (IOException ioe)
{
theLog = null;
}
if (theLog == null || !privCanWrite(logFile))
{
if (theLog != null)
theLog.close();
theLog = null;
ReadOnlyDB = true;
}
else
{
try
{
// no previous log file or previous log position
if (!initLogFile(
theLog, logFileNumber,
LogCounter.INVALID_LOG_INSTANT))
{
throw markCorrupt(
StandardException.newException(
SQLState.LOG_SEGMENT_NOT_EXIST,
logFile.getPath()));
}
}
catch (IOException ioe)
{
throw markCorrupt(
StandardException.newException(
SQLState.LOG_IO_ERROR, ioe));
}
// successfully init'd the log file - set up markers,
// and position at the end of the log.
endPosition = theLog.getFilePointer();
lastFlush = endPosition;
//if write sync is true , prellocate the log file
//and reopen the file in rws mode.
if(isWriteSynced)
{
//extend the file by wring zeros to it
preAllocateNewLogFile(theLog);
theLog.close();
theLog = openLogFileInWriteMode(logFile);
//postion the log at the current end postion
theLog.seek(endPosition);
}
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(
endPosition == LOG_FILE_HEADER_SIZE,
"empty log file has wrong size");
}
//because we already incrementing the log number
//here, no special log switch required for
//backup recoveries.
logSwitchRequired = false;
}
}
else
{
// logEnd is the instant of the next log record in the log
// it is used to determine the last known good position of
// the log
logFileNumber = LogCounter.getLogFileNumber(logEnd);
ReadOnlyDB = df.isReadOnly();
StorageFile logFile = getLogFileName(logFileNumber);
if (!ReadOnlyDB)
{
// if datafactory doesn't think it is readonly, we can
// do some futher test of our own
try
{
if(isWriteSynced)
theLog = openLogFileInWriteMode(logFile);
else
theLog = privRandomAccessFile(logFile, "rw");
}
catch (IOException ioe)
{
theLog = null;
}
if (theLog == null || !privCanWrite(logFile))
{
if (theLog != null)
theLog.close();
theLog = null;
ReadOnlyDB = true;
}
}
if (!ReadOnlyDB)
{
endPosition = LogCounter.getLogFilePosition(logEnd);
//
// The end of the log is at endPosition. Which is where
// the next log should be appending.
//
// if the last log record ends before the end of the
// log file, then this log file has a fuzzy end.
// Zap all the bytes to between endPosition to EOF to 0.
//
// the end log marker is 4 bytes (of zeros)
//
// if endPosition + 4 == logOut.length, we have a
// properly terminated log file
//
// if endPosition + 4 is > logOut.length, there are 0,
// 1, 2, or 3 bytes of 'fuzz' at the end of the log. We
// can ignore that because it is guaranteed to be
// overwritten by the next log record.
//
// if endPosition + 4 is < logOut.length, we have a
// partial log record at the end of the log.
//
// We need to overwrite all of the incomplete log
// record, because if we start logging but cannot
// 'consume' all the bad log, then the log will truly
// be corrupted if the next 4 bytes (the length of the
// log record) after that is small enough that the next
// time the database is recovered, it will be
// interpreted that the whole log record is in the log
// and will try to objectify, only to get classNotFound
// error or worse.
//
//find out if log had incomplete log records at the end.
if (redoScan.isLogEndFuzzy())
{
theLog.seek(endPosition);
long eof = theLog.length();
Monitor.logTextMessage(MessageId.LOG_INCOMPLETE_LOG_RECORD,
logFile, new Long(endPosition), new Long(eof));
/* Write zeros from incomplete log record to end of file */
long nWrites = (eof - endPosition)/logBufferSize;
int rBytes = (int)((eof - endPosition) % logBufferSize);
byte zeroBuf[]= new byte[logBufferSize];
//write the zeros to file
while(nWrites-- > 0)
theLog.write(zeroBuf);
if(rBytes !=0)
theLog.write(zeroBuf, 0, rBytes);
if(!isWriteSynced)
syncFile(theLog);
}
if (SanityManager.DEBUG)
{
if (theLog.length() != endPosition)
{
SanityManager.ASSERT(
theLog.length() > endPosition,
"log end > log file length, bad scan");
}
}
// set the log to the true end position,
// and not the end of the file
lastFlush = endPosition;
theLog.seek(endPosition);
}
}
if (theLog != null)
logOut = new LogAccessFile(this, theLog, logBufferSize);
if(logSwitchRequired)
switchLogFile();
boolean noInFlightTransactions = tf.noActiveUpdateTransaction();
if (ReadOnlyDB)
{
// in the unlikely event that someone detects we are
// dealing with a read only db, check to make sure the
// database is quiesce when it was copied with no unflushed
// dirty buffer
if (!noInFlightTransactions)
{
throw StandardException.newException(
SQLState.LOG_READ_ONLY_DB_NEEDS_UNDO);
}
}
/////////////////////////////////////////////////////////////
//
// Undo loop - in transaction factory. It just gets one
// transaction at a time from the transaction table and calls
// undo, no different from runtime.
//
/////////////////////////////////////////////////////////////
if (SanityManager.DEBUG)
{
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
SanityManager.DEBUG(LogToFile.DBG_FLAG,
"About to call undo(), transaction table =" +
tf.getTransactionTable());
}
if (!noInFlightTransactions)
{
if (SanityManager.DEBUG)
{
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
SanityManager.DEBUG(LogToFile.DBG_FLAG,
"In recovery undo, rollback inflight transactions");
}
tf.rollbackAllTransactions(recoveryTransaction, rsf);
if (SanityManager.DEBUG)
{
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
SanityManager.DEBUG(
LogToFile.DBG_FLAG, "finish recovery undo,");
}
}
else
{
if (SanityManager.DEBUG)
{
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
SanityManager.DEBUG(LogToFile.DBG_FLAG,
"No in flight transaction, no recovery undo work");
}
}
/////////////////////////////////////////////////////////////
//
// XA prepared xact loop - in transaction factory. At this
// point only prepared transactions should be left in the
// transaction table, all others should have been aborted or
// committed and removed from the transaction table. It just
// gets one transaction at a time from the transaction table,
// creates a real context and transaction, reclaims locks,
// and leaves the new xact in the transaction table.
//
/////////////////////////////////////////////////////////////
if (SanityManager.DEBUG)
{
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
SanityManager.DEBUG(LogToFile.DBG_FLAG,
"About to call rePrepare(), transaction table =" +
tf.getTransactionTable());
}
tf.handlePreparedXacts(rsf);
if (SanityManager.DEBUG)
{
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
SanityManager.DEBUG(LogToFile.DBG_FLAG,
"Finished rePrepare(), transaction table =" +
tf.getTransactionTable());
}
/////////////////////////////////////////////////////////////
//
// End of recovery.
//
/////////////////////////////////////////////////////////////
// recovery is finished. Close the transaction
recoveryTransaction.close();
// notify the dataFactory that recovery is completed,
// but before the checkpoint is written.
dataFactory.postRecovery();