LeftTupleSink sink,
BetaMemory bm,
InternalWorkingMemory wm,
RightTupleSets srcRightTuples,
LeftTupleSets trgLeftTuples) {
LeftTupleMemory ltm = bm.getLeftTupleMemory();
RightTupleMemory rtm = bm.getRightTupleMemory();
ContextEntry[] contextEntry = bm.getContext();
BetaConstraints constraints = notNode.getRawConstraints();
for (RightTuple rightTuple = srcRightTuples.getDeleteFirst(); rightTuple != null; ) {
RightTuple next = rightTuple.getStagedNext();
FastIterator it = notNode.getRightIterator(rtm);
// assign now, so we can remove from memory before doing any possible propagations
boolean useComparisonIndex = rtm.getIndexType().isComparison();
RightTuple rootBlocker = useComparisonIndex ? null : (RightTuple) it.next(rightTuple);
if (rightTuple.getMemory() != null) {
// it may have been staged and never actually added
rtm.remove(rightTuple);
}
if (rightTuple.getBlocked() != null) {
for (LeftTuple leftTuple = rightTuple.getBlocked(); leftTuple != null; ) {
LeftTuple temp = leftTuple.getBlockedNext();
leftTuple.clearBlocker();
if (leftTuple.getStagedType() == LeftTuple.UPDATE) {
// ignore, as it will get processed via left iteration. Children cannot be processed twice
leftTuple = temp;
continue;
}
constraints.updateFromTuple(contextEntry,
wm,
leftTuple);
if (useComparisonIndex) {
rootBlocker = rtm.getFirst(leftTuple, null, it);
}
// we know that older tuples have been checked so continue next
for (RightTuple newBlocker = rootBlocker; newBlocker != null; newBlocker = (RightTuple) it.next(newBlocker)) {
if (constraints.isAllowedCachedLeft(contextEntry,
newBlocker.getFactHandle())) {
leftTuple.setBlocker(newBlocker);
newBlocker.addBlocked(leftTuple);
break;
}
}
if (leftTuple.getBlocker() == null) {
// was previous blocked and not in memory, so add
ltm.add(leftTuple);
trgLeftTuples.addInsert(sink.createLeftTuple(leftTuple,
sink,
rightTuple.getPropagationContext(), true));
}