LinkedList<StackEntry> stack,
LinkedList<StackEntry> outerStack,
Set<String> visitedRules,
boolean processRian,
RuleExecutor executor) {
LeftTupleSets srcTuples;
SegmentMemory smem = smems[smemIndex];
LeftTupleSets stagedLeftTuples = null;
while (true) {
srcTuples = trgTuples; // previous target, is now the source
if (log.isTraceEnabled()) {
int offset = getOffset(node);
log.trace("{} {} {} {}", indent(offset), ++cycle, node.toString(), srcTuples.toStringSizes());
}
boolean emptySrcTuples = srcTuples.isEmpty();
if ( !(NodeTypeEnums.isBetaNode(node) && ((BetaNode)node).isRightInputIsRiaNode() ) ) {
// The engine cannot skip a ria node, as the dirty might be several levels deep
if ( emptySrcTuples && smem.getDirtyNodeMask() == 0) {
// empty sources and segment is not dirty, skip to non empty src tuples or dirty segment.
boolean foundDirty = false;
for ( int i = ++smemIndex, length = smems.length; i < length; i++ ) {
if (log.isTraceEnabled()) {
int offset = getOffset(node);
log.trace("{} Skip Segment {}", indent(offset), i-1);
}
// this is needed for subnetworks that feed into a parent network that has no right inputs,
// and may not yet be initialized
if ( smem.isEmpty() && !NodeTypeEnums.isTerminalNode(smem.getTipNode()) ) {
SegmentUtilities.createChildSegments( wm, smem, ((LeftTupleSource)smem.getTipNode()).getSinkPropagator() );
}
smem = smems[i];
bit = 1;
srcTuples = smem.getStagedLeftTuples().takeAll();
emptySrcTuples = srcTuples.isEmpty();
node = smem.getRootNode();
nodeMem = smem.getNodeMemories().getFirst();
if ( !emptySrcTuples ||
smem.getDirtyNodeMask() != 0 ||
(NodeTypeEnums.isBetaNode(node) && ((BetaNode)node).isRightInputIsRiaNode() )) {
// break if dirty or if we reach a subnetwork. It must break for subnetworks, so they can be searched.
foundDirty = true;
smemIndex = i;
break;
}
}
if (!foundDirty) {
break;
}
}
if (log.isTraceEnabled()) {
int offset = getOffset(node);
log.trace("{} Segment {}", indent(offset), smemIndex);
log.trace("{} {} {} {}", indent(offset), cycle, node.toString(), srcTuples.toStringSizes());
}
}
long dirtyMask = smem.getDirtyNodeMask();
if ( emptySrcTuples ) {
while ((dirtyMask & bit) == 0 && node != smem.getTipNode() && !(NodeTypeEnums.isBetaNode(node) && ((BetaNode)node).isRightInputIsRiaNode() ) ) {
if (log.isTraceEnabled()) {
int offset = getOffset(node);
log.trace("{} Skip Node {}", indent(offset), node);
}
bit = bit << 1; // shift to check the next node
node = ((LeftTupleSource) node).getSinkPropagator().getFirstLeftTupleSink();
nodeMem = nodeMem.getNext();
}
}
if (NodeTypeEnums.isTerminalNode(node)) {
TerminalNode rtn = ( TerminalNode ) node;
if (node.getType() == NodeTypeEnums.QueryTerminalNode) {
pQtNode.doNode((QueryTerminalNode) rtn,
wm,
srcTuples,
stack);
} else {
pRtNode.doNode(rtn,
wm,
srcTuples,
executor);
}
break;
} else if (NodeTypeEnums.RightInputAdaterNode == node.getType()) {
doRiaNode2(wm, srcTuples, (RightInputAdapterNode) node, stack);
break;
}
stagedLeftTuples = getTargetStagedLeftTuples(node, wm, smem);
LeftTupleSinkNode sink = ((LeftTupleSource) node).getSinkPropagator().getFirstLeftTupleSink();
trgTuples = new LeftTupleSetsImpl();
if (NodeTypeEnums.isBetaNode(node)) {
boolean exitInnerEval = evalBetaNode(liaNode, pmem, node, nodeMem, smems, smemIndex, trgTuples, wm, stack, outerStack, visitedRules, processRian, executor, srcTuples, stagedLeftTuples, sink);
if ( exitInnerEval ) {
break; // RiaNode exists and has placed StackEntry on the Stack
}
} else {
boolean exitInnerEval = false;
switch (node.getType()) {
case NodeTypeEnums.EvalConditionNode: {
pEvalNode.doNode((EvalConditionNode) node, (EvalMemory) nodeMem, sink,
wm, srcTuples, trgTuples, stagedLeftTuples);
break;
}
case NodeTypeEnums.FromNode: {
pFromNode.doNode((FromNode) node, (FromMemory) nodeMem, sink,
wm, srcTuples, trgTuples, stagedLeftTuples);
break;
}
case NodeTypeEnums.QueryElementNode: {
exitInnerEval = evalQueryNode(liaNode, pmem, node, bit, nodeMem, smems, smemIndex, trgTuples, wm, stack, visitedRules, srcTuples, sink);
break;
}
case NodeTypeEnums.TimerConditionNode: {
pTimerNode.doNode( (TimerNode) node, (TimerNodeMemory) nodeMem, pmem, sink, wm, srcTuples, trgTuples, stagedLeftTuples);
break;
}
case NodeTypeEnums.ConditionalBranchNode: {
pBranchNode.doNode((ConditionalBranchNode) node, (ConditionalBranchMemory) nodeMem, sink,
wm, srcTuples, trgTuples, stagedLeftTuples, executor);
break;
}
}
if ( exitInnerEval && trgTuples.isEmpty() ) {
break; // Queries exists and has been placed StackEntry, and there are no current trgTuples to process
}
}
if (node != smem.getTipNode()) {
// get next node and node memory in the segment
node = sink;
nodeMem = nodeMem.getNext();
bit = bit << 1;
} else {
// Reached end of segment, start on new segment.
synchronized ( smem.getFirst().getStagedLeftTuples() ) {
smem.getFirst().getStagedLeftTuples().addAll( stagedLeftTuples ); // must put back all the LTs
// end of SegmentMemory, so we know that stagedLeftTuples is not null
SegmentPropagator.propagate(smem,
trgTuples,
wm);
bit = 1;
smem = smems[++smemIndex];
trgTuples = smem.getStagedLeftTuples().takeAll();
}
if (log.isTraceEnabled()) {
int offset = getOffset(node);
log.trace("{} Segment {}", indent(offset), smemIndex);
}
node = smem.getRootNode();
nodeMem = smem.getNodeMemories().getFirst();
}
processRian = true; // make sure it's reset, so ria nodes are processed
}
if ( stagedLeftTuples != null && !stagedLeftTuples.isEmpty() ) {
// must restore the StagedLeftTulpes to the segment they were removed from
synchronized ( smem.getFirst().getStagedLeftTuples() ) {
smem.getFirst().getStagedLeftTuples().addAll( stagedLeftTuples ); // must put back all the LTs
}
}