LeftTupleSets trgLeftTuples,
LeftTupleSets stagedLeftTuples) {
LeftTupleMemory ltm = bm.getLeftTupleMemory();
RightTupleMemory rtm = bm.getRightTupleMemory();
ContextEntry[] contextEntry = bm.getContext();
BetaConstraints constraints = existsNode.getRawConstraints();
for (RightTuple rightTuple = srcRightTuples.getUpdateFirst(); rightTuple != null; ) {
RightTuple next = rightTuple.getStagedNext();
FastIterator leftIt = existsNode.getLeftIterator(ltm);
PropagationContext context = rightTuple.getPropagationContext();
LeftTuple firstLeftTuple = existsNode.getFirstLeftTuple(rightTuple, ltm, context, leftIt);
LeftTuple firstBlocked = rightTuple.getBlocked();
// we now have reference to the first Blocked, so null it in the rightTuple itself, so we can rebuild
rightTuple.nullBlocked();
// first process non-blocked tuples, as we know only those ones are in the left memory.
for (LeftTuple leftTuple = firstLeftTuple; leftTuple != null; ) {
// preserve next now, in case we remove this leftTuple
LeftTuple temp = (LeftTuple) leftIt.next(leftTuple);
if (leftTuple.getStagedType() == LeftTuple.UPDATE) {
// ignore, as it will get processed via left iteration. Children cannot be processed twice
leftTuple = temp;
continue;
}
// we know that only unblocked LeftTuples are still in the memory
if (constraints.isAllowedCachedRight(contextEntry,
leftTuple)) {
leftTuple.setBlocker(rightTuple);
rightTuple.addBlocked(leftTuple);
// this is now blocked so remove from memory
ltm.remove(leftTuple);
// subclasses like ForallNotNode might override this propagation
trgLeftTuples.addInsert(sink.createLeftTuple(leftTuple,
sink,
rightTuple.getPropagationContext(), true));
}
leftTuple = temp;
}
if (firstBlocked != null) {
FastIterator rightIt = existsNode.getRightIterator(rtm);
boolean useComparisonIndex = rtm.getIndexType().isComparison();
// now process existing blocks, we only process existing and not new from above loop
RightTuple rootBlocker = useComparisonIndex ? null : (RightTuple) rightIt.next(rightTuple);
RightTupleList list = rightTuple.getMemory();
// we must do this after we have the next in memory
// We add to the end to give an opportunity to re-match if in same bucket
rtm.removeAdd(rightTuple);
if (!useComparisonIndex && rootBlocker == null && list == rightTuple.getMemory()) {
// we are at the end of the list, but still in same bucket, so set to self, to give self a chance to rematch
rootBlocker = rightTuple;
}
// iterate all the existing previous blocked LeftTuples
for (LeftTuple leftTuple = (LeftTuple) firstBlocked; leftTuple != null; ) {
LeftTuple temp = leftTuple.getBlockedNext();
leftTuple.clearBlocker(); // must null these as we are re-adding them to the list
if (leftTuple.getStagedType() == LeftTuple.UPDATE) {
// ignore, as it will get processed via left iteration. Children cannot be processed twice
// but need to add it back into list first
leftTuple.setBlocker(rightTuple);
rightTuple.addBlocked(leftTuple);
leftTuple = temp;
continue;
}
constraints.updateFromTuple(contextEntry,
wm,
leftTuple);
if (useComparisonIndex) {
rootBlocker = existsNode.getFirstRightTuple(leftTuple, rtm, null, rightIt);
}
// we know that older tuples have been checked so continue next
for (RightTuple newBlocker = rootBlocker; newBlocker != null; newBlocker = (RightTuple) rightIt.next(newBlocker)) {
if (constraints.isAllowedCachedLeft(contextEntry,
newBlocker.getFactHandle())) {
leftTuple.setBlocker(newBlocker);
newBlocker.addBlocked(leftTuple);
break;
}
}
if (leftTuple.getBlocker() == null) {
// was previous blocked and not in memory, so add
ltm.add(leftTuple);
LeftTuple childLeftTuple = leftTuple.getFirstChild();
if (childLeftTuple != null) {
childLeftTuple.setPropagationContext(rightTuple.getPropagationContext());
childLeftTuple = deleteLeftChild(childLeftTuple, trgLeftTuples, stagedLeftTuples);
}
}
leftTuple = temp;
}
} else {
// we had to do this at the end, rather than beginning as this 'if' block needs the next memory tuple
rtm.removeAdd(rightTuple);
}
rightTuple.clearStaged();
rightTuple = next;
}
constraints.resetFactHandle(contextEntry);
}