logOutputBuffer.reset();
// always use the short Id, only the BeginXact log record contains
// the XactId (long form)
TransactionId transactionId = xact.getId();
// write out the log header with the operation embedded
// this is by definition not a compensation log record,
// those are called thru the logAndUndo interface
logRecord.setValue(transactionId, operation);
inUserCode = true;
logicalOut.writeObject(logRecord);
inUserCode = false;
int optionalDataLength = 0;
int optionalDataOffset = 0;
int completeLength = 0;
ByteArray preparedLogArray = operation.getPreparedLog();
if (preparedLogArray != null) {
preparedLog = preparedLogArray.getArray();
optionalDataLength = preparedLogArray.getLength();
optionalDataOffset = preparedLogArray.getOffset();
// There is a race condition if the operation is a begin tran in
// that between the time the beginXact log record is written to
// disk and the time the transaction object is updated in the
// beginXact.doMe method, other log records may be written.
// This will render the transaction table in an inconsistent state
// since it may think a later transaction is the earliest
// transaction or it may think that there is no active transactions
// where there is a bunch of them sitting on the log.
//
// Similarly, there is a race condition for endXact, i.e.,
// 1) endXact is written to the log,
// 2) checkpoint gets that (committed) transaction as the
// firstUpdateTransaction
// 3) the transaction calls postComplete, nulling out itself
// 4) checkpoint tries to access a closed transaction object
//
// The solution is to sync between the time a begin tran or end
// tran log record is sent to the log stream and its doMe method is
// called to update the transaction table and in memory state
//
// We only need to serialized the begin and end Xact log records
// because once a transaction has been started and in the
// transaction table, its order and transaction state does not
// change.
//
// Use the logFactory as the sync object so that a checkpoint can
// take its snap shot of the undoLWM before or after a transaction
// is started, but not in the middle. (see LogToFile.checkpoint)
//
// now set the input limit to be the optional data.
// This limits amount of data availiable to logIn that doMe can
// use
logIn.setData(preparedLog);
logIn.setPosition(optionalDataOffset);
logIn.setLimit(optionalDataLength);
if (SanityManager.DEBUG)
{
if ((optionalDataLength) != logIn.available())
SanityManager.THROWASSERT(
" stream not set correctly " +
optionalDataLength + " != " +
logIn.available());
}
} else {
preparedLog = null;
optionalDataLength = 0;
}
logicalOut.writeInt(optionalDataLength);
completeLength = logOutputBuffer.getPosition() + optionalDataLength;
LogInstant logInstant = null;
int encryptedLength = 0; // in case of encryption, we need to pad
try
{
if (logFactory.databaseEncrypted())
{
// we must pad the encryption data to be multiple of block
// size, which is logFactory.getEncryptionBlockSize()
encryptedLength = completeLength;
if ((encryptedLength % logFactory.getEncryptionBlockSize()) != 0)
encryptedLength = encryptedLength + logFactory.getEncryptionBlockSize() - (encryptedLength % logFactory.getEncryptionBlockSize());
if (encryptionBuffer == null ||
encryptionBuffer.length < encryptedLength)
encryptionBuffer = new byte[encryptedLength];
System.arraycopy(logOutputBuffer.getByteArray(), 0,
encryptionBuffer, 0, completeLength-optionalDataLength);
if (optionalDataLength > 0)
System.arraycopy(preparedLog, optionalDataOffset,
encryptionBuffer,
completeLength-optionalDataLength, optionalDataLength);
// do not bother to clear out the padding area
int len =
logFactory.encrypt(encryptionBuffer, 0, encryptedLength,
encryptionBuffer, 0);
if (SanityManager.DEBUG)
SanityManager.ASSERT(len == encryptedLength,
"encrypted log buffer length != log buffer len");
}
if ((operation.group() & (Loggable.FIRST | Loggable.LAST)) != 0)
{
synchronized (logFactory)
{
long instant = 0;
if (logFactory.databaseEncrypted())
{
// encryption has completely drained both the the
// logOuputBuffer array and the preparedLog array
instant = logFactory.
appendLogRecord(encryptionBuffer, 0,
encryptedLength, null,
-1, 0);
}
else
{
instant = logFactory.
appendLogRecord(logOutputBuffer.getByteArray(),
0, completeLength, preparedLog,
optionalDataOffset,
optionalDataLength);
}
logInstant = new LogCounter(instant);
operation.doMe(xact, logInstant, logIn);
}
}
else
{
long instant = 0;
if (logFactory.databaseEncrypted())
{
// encryption has completely drained both the the
// logOuputBuffer array and the preparedLog array
instant = logFactory.
appendLogRecord(encryptionBuffer, 0,
encryptedLength, null, -1, 0);
}
else
{
instant = logFactory.
appendLogRecord(logOutputBuffer.getByteArray(), 0,
completeLength, preparedLog,
optionalDataOffset,
optionalDataLength);
}
logInstant = new LogCounter(instant);
operation.doMe(xact, logInstant, logIn);
}
}
catch (StandardException se)
{
throw logFactory.markCorrupt(
StandardException.newException(
SQLState.LOG_DO_ME_FAIL, se, operation));
}
catch (IOException ioe)
{
throw logFactory.markCorrupt(
StandardException.newException(
SQLState.LOG_DO_ME_FAIL, ioe, operation));
}
finally
{
logIn.clearLimit();
}
if (SanityManager.DEBUG)
{
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
{
SanityManager.DEBUG(
LogToFile.DBG_FLAG,
"Write log record: tranId=" + transactionId.toString() +
" instant: " + logInstant.toString() + " length: " +
completeLength + "\n" + operation + "\n");
}
}
return logInstant;