checkState(false);
try {
long commitLsn = DbLsn.NULL_LSN;
synchronized (this) {
if (checkCursorsForClose()) {
throw new DatabaseException
("Transaction " + id +
" commit failed because there were open cursors.");
}
/* Transfer handle locks to their owning handles. */
if (handleLockToHandleMap != null) {
Iterator handleLockIter =
handleLockToHandleMap.entrySet().iterator();
while (handleLockIter.hasNext()){
Map.Entry entry = (Map.Entry) handleLockIter.next();
transferHandleLockToHandleSet((Long) entry.getKey(),
(Set) entry.getValue());
}
}
LogManager logManager = envImpl.getLogManager();
/*
* Release all read locks, clear lock collection. Optimize for
* the case where there are no read locks.
*/
int numReadLocks = clearReadLocks();
/*
* Log the commit if we ever held any write locks. Note that
* with dbhandle write locks, we may have held the write lock
* but then had it transferred away.
*/
int numWriteLocks = 0;
if (writeInfo != null) {
numWriteLocks = writeInfo.size();
TxnCommit commitRecord =
new TxnCommit(id, lastLoggedLsn);
if (flushSyncBehavior == TXN_SYNC) {
/* Flush and sync required. */
commitLsn = logManager.
logForceFlush(commitRecord, true);
} else if (flushSyncBehavior == TXN_WRITE_NOSYNC) {
/* Flush but no sync required. */
commitLsn = logManager.
logForceFlush(commitRecord, false);
} else {
/* No flush, no sync required. */
commitLsn = logManager.log(commitRecord);
}
/*
* Used to prevent double counting abortLNS if there is
* more then one node with the same abortLSN in this txn.
* Two nodes with the same abortLSN occur when a deleted
* slot is reused in the same txn.
*/
Set alreadyCountedLsnSet = new HashSet();
/* Release all write locks, clear lock collection. */
Iterator iter = writeInfo.values().iterator();
while (iter.hasNext()) {
WriteLockInfo info = (WriteLockInfo) iter.next();
lockManager.release(info.lock, this);
/*
* Count the abortLSN as obsolete. Do not count if a
* slot with a deleted LN was reused
* (abortKnownDeleted), to avoid double counting.
*/
if (info.abortLsn != DbLsn.NULL_LSN &&
!info.abortKnownDeleted) {
Long longLsn = new Long(info.abortLsn);
if (!alreadyCountedLsnSet.contains(longLsn)) {
logManager.countObsoleteNode
(info.abortLsn, null);
alreadyCountedLsnSet.add(longLsn);
}
}
}
writeInfo = null;
/* Unload delete info, but don't wake up the compressor. */
if ((deleteInfo != null) && deleteInfo.size() > 0) {
envImpl.addToCompressorQueue(deleteInfo.values(),
false); // don't wakeup
deleteInfo.clear();
}
cleanupDatabaseImpls(true);
}
traceCommit(numWriteLocks, numReadLocks);
}
/*
* Unregister this txn. Be sure to do this outside the
* synchronization block, to avoid conflict w/checkpointer.
*/
close(true);
return commitLsn;
} catch (RunRecoveryException e) {
/* May have received a thread interrupt. */
throw e;
} catch (Throwable t) {
try {
/*
* If the exception thrown is a DatabaseException it indicates
* that the write() call hit an IOException, probably out of
* disk space, and attempted to rewrite all commit records as
* abort records. Since the abort records are already
* rewritten (or at least attempted to be rewritten), there is
* no reason to have abort attempt to write an abort record
* again. See [11271].
*/
abortInternal(flushSyncBehavior == TXN_SYNC,
!(t instanceof DatabaseException));
Tracer.trace(envImpl, "Txn", "commit",
"Commit of transaction " + id + " failed", t);
} catch (Throwable abortT2) {
throw new DatabaseException
("Failed while attempting to commit transaction " +
id +
". The attempt to abort and clean up also failed. " +
"The original exception seen from commit = " +
t.getMessage() +
" The exception from the cleanup = " +
abortT2.getMessage(),
t);
}
/* Now throw an exception that shows the commit problem. */
throw new DatabaseException
("Failed while attempting to commit transaction " + id +
", aborted instead. Original exception = " +
t.getMessage(), t);
}
}