{
XidEncoding encodingXid = new XidEncoding(preparedTransaction.extraData);
Xid xid = encodingXid.xid;
Transaction tx = new TransactionImpl(preparedTransaction.id, xid, this);
List<MessageReference> referencesToAck = new ArrayList<MessageReference>();
Map<Long, ServerMessage> messages = new HashMap<Long, ServerMessage>();
// Use same method as load message journal to prune out acks, so they don't get added.
// Then have reacknowledge(tx) methods on queue, which needs to add the page size
// first get any sent messages for this tx and recreate
for (RecordInfo record : preparedTransaction.records)
{
byte[] data = record.data;
HornetQBuffer buff = HornetQBuffers.wrappedBuffer(data);
byte recordType = record.getUserRecordType();
switch (recordType)
{
case JournalRecordIds.ADD_LARGE_MESSAGE:
{
messages.put(record.id, parseLargeMessage(messages, buff));
break;
}
case JournalRecordIds.ADD_MESSAGE:
{
ServerMessage message = new ServerMessageImpl(record.id, 50);
message.decode(buff);
messages.put(record.id, message);
break;
}
case JournalRecordIds.ADD_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
Queue queue = queues.get(encoding.queueID);
if (queue == null)
{
HornetQServerLogger.LOGGER.journalMessageInPreparedTX(encoding.queueID);
}
else
{
ServerMessage message = messages.get(messageID);
if (message == null)
{
throw new IllegalStateException("Cannot find message with id " + messageID);
}
postOffice.reroute(message, queue, tx);
}
break;
}
case JournalRecordIds.ACKNOWLEDGE_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
Queue queue = queues.get(encoding.queueID);
if (queue == null)
{
throw new IllegalStateException("Cannot find queue with id " + encoding.queueID);
}
MessageReference removed = queue.removeReferenceWithID(messageID);
if (removed == null)
{
HornetQServerLogger.LOGGER.journalErrorRemovingRef(messageID);
}
else
{
referencesToAck.add(removed);
}
break;
}
case JournalRecordIds.PAGE_TRANSACTION:
{
PageTransactionInfo pageTransactionInfo = new PageTransactionInfoImpl();
pageTransactionInfo.decode(buff);
if (record.isUpdate)
{
PageTransactionInfo pgTX = pagingManager.getTransaction(pageTransactionInfo.getTransactionID());
pgTX.reloadUpdate(this, pagingManager, tx, pageTransactionInfo.getNumberOfMessages());
}
else
{
pageTransactionInfo.setCommitted(false);
tx.putProperty(TransactionPropertyIndexes.PAGE_TRANSACTION, pageTransactionInfo);
pagingManager.addTransaction(pageTransactionInfo);
tx.addOperation(new FinishPageMessageOperation());
}
break;
}
case SET_SCHEDULED_DELIVERY_TIME:
{
// Do nothing - for prepared txs, the set scheduled delivery time will only occur in a send in which
// case the message will already have the header for the scheduled delivery time, so no need to do
// anything.
break;
}
case DUPLICATE_ID:
{
// We need load the duplicate ids at prepare time too
DuplicateIDEncoding encoding = new DuplicateIDEncoding();
encoding.decode(buff);
DuplicateIDCache cache = postOffice.getDuplicateIDCache(encoding.address);
cache.load(tx, encoding.duplID);
break;
}
case ACKNOWLEDGE_CURSOR:
{
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buff);
encoding.position.setRecordID(record.id);
PageSubscription sub = locateSubscription(encoding.queueID,
pageSubscriptions,
queueInfos,
pagingManager);
if (sub != null)
{
sub.reloadPreparedACK(tx, encoding.position);
referencesToAck.add(new PagedReferenceImpl(encoding.position, null, sub));
}
else
{
HornetQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.queueID);
}
break;
}
case PAGE_CURSOR_COUNTER_VALUE:
{
HornetQServerLogger.LOGGER.journalPAGEOnPrepared();
break;
}
case PAGE_CURSOR_COUNTER_INC:
{
PageCountRecordInc encoding = new PageCountRecordInc();
encoding.decode(buff);
PageSubscription sub = locateSubscription(encoding.queueID,
pageSubscriptions,
queueInfos,
pagingManager);
if (sub != null)
{
sub.getCounter().applyIncrement(tx, record.id, encoding.value);
sub.notEmpty();
}
else
{
HornetQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.queueID);
}
break;
}
default:
{
HornetQServerLogger.LOGGER.journalInvalidRecordType(recordType);
}
}
}
for (RecordInfo recordDeleted : preparedTransaction.recordsToDelete)
{
byte[] data = recordDeleted.data;
if (data.length > 0)
{
HornetQBuffer buff = HornetQBuffers.wrappedBuffer(data);
byte b = buff.readByte();
switch (b)
{
case ADD_LARGE_MESSAGE_PENDING:
{
long messageID = buff.readLong();
if (!pendingLargeMessages.remove(new Pair<Long, Long>(recordDeleted.id, messageID)))
{
// TODO: Logging
HornetQServerLogger.LOGGER.warn("Large message " + recordDeleted.id + " wasn't found when dealing with add pending large message");
}
installLargeMessageConfirmationOnTX(tx, recordDeleted.id);
break;
}
default:
HornetQServerLogger.LOGGER.journalInvalidRecordTypeOnPreparedTX(b);
}
}
}
for (MessageReference ack : referencesToAck)
{
ack.getQueue().reacknowledge(tx, ack);
}
tx.setState(Transaction.State.PREPARED);
resourceManager.putTransaction(xid, tx);
}
}