public void updateSink( final ObjectSink sink, final PropagationContext context, final InternalWorkingMemory workingMemory) { BetaNode betaNode = (BetaNode) this.sink.getSinks()[0]; Memory betaMemory = workingMemory.getNodeMemory(betaNode); BetaMemory bm; if (betaNode.getType() == NodeTypeEnums.AccumulateNode) { bm = ((AccumulateMemory) betaMemory).getBetaMemory(); } else { bm = (BetaMemory) betaMemory; } // for RIA nodes, we need to store the ID of the created handles bm.getRightTupleMemory().iterator(); if (bm.getRightTupleMemory().size() > 0) { final org.drools.core.util.Iterator it = bm.getRightTupleMemory().iterator(); for (RightTuple entry = (RightTuple) it.next(); entry != null; entry = (RightTuple) it.next()) { LeftTuple leftTuple = (LeftTuple) entry.getFactHandle().getObject(); InternalFactHandle handle = (InternalFactHandle) leftTuple.getObject(); sink.assertObject(handle, context, workingMemory); } } }
public static void findLeftTupleBlocker( BetaNode betaNode, TupleMemory rtm, ContextEntry[] contextEntry, BetaConstraints constraints, LeftTuple leftTuple, FastIterator it, boolean useLeftMemory) { // This method will also remove rightTuples that are from subnetwork where no leftmemory use // used for (RightTuple rightTuple = betaNode.getFirstRightTuple(leftTuple, rtm, null, it); rightTuple != null; ) { RightTuple nextRight = (RightTuple) it.next(rightTuple); if (constraints.isAllowedCachedLeft(contextEntry, rightTuple.getFactHandle())) { leftTuple.setBlocker(rightTuple); if (useLeftMemory) { rightTuple.addBlocked(leftTuple); break; } else if (betaNode.isRightInputIsRiaNode()) { // If we aren't using leftMemory and the right input is a RIAN, then we must iterate and // find all subetwork right tuples and remove them // so we don't break rtm.remove(rightTuple); } else { break; } } rightTuple = nextRight; } }
private static ProtobufMessages.NodeMemory writeRIANodeMemory( final int nodeId, final MarshallerWriteContext context, final BaseNode node, final NodeMemories memories, final Memory memory) { RightInputAdapterNode riaNode = (RightInputAdapterNode) node; ObjectSink[] sinks = riaNode.getSinkPropagator().getSinks(); BetaNode betaNode = (BetaNode) sinks[0]; Memory betaMemory = memories.peekNodeMemory(betaNode.getId()); if (betaMemory == null) { return null; } BetaMemory bm; if (betaNode.getType() == NodeTypeEnums.AccumulateNode) { bm = ((AccumulateMemory) betaMemory).getBetaMemory(); } else { bm = (BetaMemory) betaMemory; } // for RIA nodes, we need to store the ID of the created handles bm.getRightTupleMemory().iterator(); if (bm.getRightTupleMemory().size() > 0) { ProtobufMessages.NodeMemory.RIANodeMemory.Builder _ria = ProtobufMessages.NodeMemory.RIANodeMemory.newBuilder(); final org.drools.core.util.Iterator it = bm.getRightTupleMemory().iterator(); // iterates over all propagated handles and assert them to the new sink for (RightTuple entry = (RightTuple) it.next(); entry != null; entry = (RightTuple) it.next()) { LeftTuple leftTuple = (LeftTuple) entry.getFactHandle().getObject(); InternalFactHandle handle = (InternalFactHandle) leftTuple.getObject(); FactHandle _handle = ProtobufMessages.FactHandle.newBuilder() .setId(handle.getId()) .setRecency(handle.getRecency()) .build(); _ria.addContext( ProtobufMessages.NodeMemory.RIANodeMemory.RIAContext.newBuilder() .setTuple(PersisterHelper.createTuple(leftTuple)) .setResultHandle(_handle) .build()); } return ProtobufMessages.NodeMemory.newBuilder() .setNodeId(nodeId) .setNodeType(ProtobufMessages.NodeMemory.NodeType.RIA) .setRia(_ria.build()) .build(); } return null; }
private void doRiaNode( InternalWorkingMemory wm, LeftInputAdapterNode liaNode, PathMemory pmem, TupleSets<LeftTuple> srcTuples, BetaNode betaNode, LeftTupleSinkNode sink, SegmentMemory[] smems, int smemIndex, Memory nodeMem, BetaMemory bm, LinkedList<StackEntry> stack, RuleExecutor executor) { RiaPathMemory pathMem = bm.getRiaRuleMemory(); SegmentMemory[] subnetworkSmems = pathMem.getSegmentMemories(); SegmentMemory subSmem = null; for (int i = 0; subSmem == null; i++) { // segment positions outside of the subnetwork, in the parent chain, are null // so we must iterate to find the first non null segment memory subSmem = subnetworkSmems[i]; } // Resume the node after the riaNode segment has been processed and the right input memory // populated StackEntry stackEntry = new StackEntry( liaNode, betaNode, bm.getNodePosMaskBit(), sink, pmem, nodeMem, smems, smemIndex, srcTuples, false, false); stack.add(stackEntry); if (log.isTraceEnabled()) { int offset = getOffset(betaNode); log.trace( "{} RiaQueue {} {}", indent(offset), betaNode.toString(), srcTuples.toStringSizes()); } TupleSets<LeftTuple> subLts = subSmem.getStagedLeftTuples().takeAll(); // node is first in the segment, so bit is 1 innerEval( liaNode, pathMem, subSmem.getRootNode(), 1, subSmem.getNodeMemories().getFirst(), subnetworkSmems, subSmem.getPos(), subLts, wm, stack, true, executor); }
private void initPathMemories() { pathMemories = new PathMemory[3]; NamedEntryPoint ep = (NamedEntryPoint) epManipulators[8].getEntryPoiny(); InternalWorkingMemory wm = ((NamedEntryPoint) ep).getInternalWorkingMemory(); ObjectTypeNode otn = ((NamedEntryPoint) ep) .getEntryPointNode() .getObjectTypeNodes() .values() .iterator() .next(); AlphaNode alpha = (AlphaNode) otn.getSinkPropagator().getSinks()[0]; ObjectSink[] sinks = alpha.getSinkPropagator().getSinks(); for (int i = 0; i < sinks.length; i++) { BetaNode beta = (BetaNode) sinks[i]; RuleTerminalNode rtn = (RuleTerminalNode) beta.getSinkPropagator().getSinks()[0]; pathMemories[i] = (PathMemory) wm.getNodeMemory(rtn); } }
@Override protected void doVisit( NetworkNode node, Stack<NetworkNode> nodeStack, StatefulKnowledgeSessionInfo info) { RightInputAdapterNode an = (RightInputAdapterNode) node; DefaultNodeInfo ni = (DefaultNodeInfo) info.getNodeInfo(node); BetaNode betaNode = (BetaNode) an.getSinkPropagator().getSinks()[0]; Memory childMemory = info.getSession().getNodeMemory(betaNode); BetaMemory bm; if (betaNode.getType() == NodeTypeEnums.AccumulateNode) { bm = ((AccumulateMemory) childMemory).getBetaMemory(); } else { bm = (BetaMemory) childMemory; } ni.setMemoryEnabled(true); ni.setTupleMemorySize(bm.getRightTupleMemory().size()); ni.setCreatedFactHandles(bm.getRightTupleMemory().size()); }
private boolean evalBetaNode( LeftInputAdapterNode liaNode, PathMemory pmem, NetworkNode node, Memory nodeMem, SegmentMemory[] smems, int smemIndex, TupleSets<LeftTuple> trgTuples, InternalWorkingMemory wm, LinkedList<StackEntry> stack, boolean processRian, RuleExecutor executor, TupleSets<LeftTuple> srcTuples, TupleSets<LeftTuple> stagedLeftTuples, LeftTupleSinkNode sink) { BetaNode betaNode = (BetaNode) node; BetaMemory bm; AccumulateMemory am = null; if (NodeTypeEnums.AccumulateNode == node.getType()) { am = (AccumulateMemory) nodeMem; bm = am.getBetaMemory(); } else { bm = (BetaMemory) nodeMem; } if (processRian && betaNode.isRightInputIsRiaNode()) { // if the subnetwork is nested in this segment, it will create srcTuples containing // peer LeftTuples, suitable for the node in the main path. doRiaNode( wm, liaNode, pmem, srcTuples, betaNode, sink, smems, smemIndex, nodeMem, bm, stack, executor); return true; // return here, doRiaNode queues the evaluation on the stack, which is necessary // to handled nested query nodes } switchOnDoBetaNode(node, trgTuples, wm, srcTuples, stagedLeftTuples, sink, bm, am); return false; }
public void removeMemory(InternalWorkingMemory workingMemory) { BetaNode betaNode = (BetaNode) this.sink.getSinks()[0]; Memory betaMemory = workingMemory.getNodeMemory(betaNode); BetaMemory bm; if (betaNode.getType() == NodeTypeEnums.AccumulateNode) { bm = ((AccumulateMemory) betaMemory).getBetaMemory(); } else { bm = (BetaMemory) betaMemory; } if (bm.getRightTupleMemory().size() > 0) { final Iterator it = bm.getRightTupleMemory().iterator(); for (RightTuple entry = (RightTuple) it.next(); entry != null; entry = (RightTuple) it.next()) { LeftTuple leftTuple = (LeftTuple) entry.getFactHandle().getObject(); leftTuple.unlinkFromLeftParent(); leftTuple.unlinkFromRightParent(); } } workingMemory.clearNodeMemory(this); }
private void doRiaNode2( InternalWorkingMemory wm, TupleSets<LeftTuple> srcTuples, RightInputAdapterNode riaNode) { ObjectSink[] sinks = riaNode.getSinkPropagator().getSinks(); BetaNode betaNode = (BetaNode) sinks[0]; BetaMemory bm; Memory nodeMem = wm.getNodeMemory(betaNode); if (NodeTypeEnums.AccumulateNode == betaNode.getType()) { bm = ((AccumulateMemory) nodeMem).getBetaMemory(); } else { bm = (BetaMemory) nodeMem; } // Build up iteration array for other sinks BetaNode[] bns = null; BetaMemory[] bms = null; int length = sinks.length; if (length > 1) { bns = new BetaNode[sinks.length - 1]; bms = new BetaMemory[sinks.length - 1]; for (int i = 1; i < length; i++) { bns[i - 1] = (BetaNode) sinks[i]; Memory nodeMem2 = wm.getNodeMemory(bns[i - 1]); if (NodeTypeEnums.AccumulateNode == betaNode.getType()) { bms[i - 1] = ((AccumulateMemory) nodeMem2).getBetaMemory(); } else { bms[i - 1] = (BetaMemory) nodeMem2; } } } length--; // subtract one, as first is not in the array; for (LeftTuple leftTuple = srcTuples.getInsertFirst(); leftTuple != null; ) { LeftTuple next = leftTuple.getStagedNext(); PropagationContext pctx = leftTuple.getPropagationContext(); InternalFactHandle handle = riaNode.createFactHandle(leftTuple, pctx, wm); RightTuple rightTuple = new RightTupleImpl(handle, betaNode); leftTuple.setContextObject(handle); rightTuple.setPropagationContext(pctx); if (bm.getStagedRightTuples().isEmpty()) { bm.setNodeDirtyWithoutNotify(); } bm.getStagedRightTuples().addInsert(rightTuple); if (bns != null) { // Add peered RightTuples, they are attached to FH - unlink LeftTuples that has a peer ref for (int i = 0; i < length; i++) { rightTuple = new RightTupleImpl(handle, bns[i]); rightTuple.setPropagationContext(pctx); if (bms[i].getStagedRightTuples().isEmpty()) { bms[i].setNodeDirtyWithoutNotify(); } bms[i].getStagedRightTuples().addInsert(rightTuple); } } leftTuple.clearStaged(); leftTuple = next; } for (LeftTuple leftTuple = srcTuples.getDeleteFirst(); leftTuple != null; ) { LeftTuple next = leftTuple.getStagedNext(); InternalFactHandle handle = (InternalFactHandle) leftTuple.getContextObject(); RightTuple rightTuple = handle.getFirstRightTuple(); TupleSets<RightTuple> rightTuples = bm.getStagedRightTuples(); if (rightTuples.isEmpty()) { bm.setNodeDirtyWithoutNotify(); } rightTuples.addDelete(rightTuple); if (bns != null) { // Add peered RightTuples, they are attached to FH - unlink LeftTuples that has a peer ref for (int i = 0; i < length; i++) { rightTuple = rightTuple.getHandleNext(); rightTuples = bms[i].getStagedRightTuples(); if (rightTuples.isEmpty()) { bms[i].setNodeDirtyWithoutNotify(); } rightTuples.addDelete(rightTuple); } } leftTuple.clearStaged(); leftTuple = next; } for (LeftTuple leftTuple = srcTuples.getUpdateFirst(); leftTuple != null; ) { LeftTuple next = leftTuple.getStagedNext(); InternalFactHandle handle = (InternalFactHandle) leftTuple.getContextObject(); RightTuple rightTuple = handle.getFirstRightTuple(); TupleSets<RightTuple> rightTuples = bm.getStagedRightTuples(); if (rightTuples.isEmpty()) { bm.setNodeDirtyWithoutNotify(); } rightTuples.addUpdate(rightTuple); if (bns != null) { // Add peered RightTuples, they are attached to FH - unlink LeftTuples that has a peer ref for (int i = 0; i < length; i++) { rightTuple = rightTuple.getHandleNext(); rightTuples = bms[i].getStagedRightTuples(); if (rightTuples.isEmpty()) { bms[i].setNodeDirtyWithoutNotify(); } rightTuples.addUpdate(rightTuple); } } leftTuple.clearStaged(); leftTuple = next; } srcTuples.resetAll(); }
public static void doUpdatesExistentialReorderRightMemory( BetaMemory bm, BetaNode betaNode, TupleSets<RightTuple> srcRightTuples) { TupleMemory rtm = bm.getRightTupleMemory(); boolean resumeFromCurrent = !(betaNode.isIndexedUnificationJoin() || rtm.getIndexType().isComparison()); // remove all the staged rightTuples from the memory before to readd them all // this is to avoid split bucket when an updated rightTuple hasn't been moved yet // and so it is the first entry in the wrong bucket for (RightTuple rightTuple = srcRightTuples.getUpdateFirst(); rightTuple != null; ) { RightTuple next = rightTuple.getStagedNext(); if (rightTuple.getMemory() != null) { rightTuple.setTempRightTupleMemory(rightTuple.getMemory()); if (resumeFromCurrent) { if (rightTuple.getBlocked() != null) { // look for a non-staged right tuple first forward ... RightTuple tempRightTuple = (RightTuple) rightTuple.getNext(); while (tempRightTuple != null && tempRightTuple.getStagedType() != LeftTuple.NONE) { // next cannot be an updated or deleted rightTuple tempRightTuple = (RightTuple) tempRightTuple.getNext(); } // ... and if cannot find one try backward if (tempRightTuple == null) { tempRightTuple = (RightTuple) rightTuple.getPrevious(); while (tempRightTuple != null && tempRightTuple.getStagedType() != LeftTuple.NONE) { // next cannot be an updated or deleted rightTuple tempRightTuple = (RightTuple) tempRightTuple.getPrevious(); } } rightTuple.setTempNextRightTuple(tempRightTuple); } } rightTuple.setTempBlocked(rightTuple.getBlocked()); rightTuple.setBlocked(null); rtm.remove(rightTuple); } rightTuple = next; } for (RightTuple rightTuple = srcRightTuples.getUpdateFirst(); rightTuple != null; ) { RightTuple next = rightTuple.getStagedNext(); if (rightTuple.getTempRightTupleMemory() != null) { rtm.add(rightTuple); if (resumeFromCurrent) { RightTuple tempRightTuple = rightTuple.getTempNextRightTuple(); if (rightTuple.getBlocked() != null && tempRightTuple == null && rightTuple.getMemory() == rightTuple.getTempRightTupleMemory()) { // the next RightTuple was null, but current RightTuple was added back into the same // bucket, so reset as root blocker to re-match can be attempted rightTuple.setTempNextRightTuple(rightTuple); } } for (LeftTuple childLeftTuple = rightTuple.getFirstChild(); childLeftTuple != null; ) { LeftTuple childNext = childLeftTuple.getRightParentNext(); childLeftTuple.reAddLeft(); childLeftTuple = childNext; } } rightTuple = next; } }
private void doRiaNode( InternalWorkingMemory wm, LeftInputAdapterNode liaNode, PathMemory rmem, LeftTupleSets srcTuples, BetaNode betaNode, LeftTupleSinkNode sink, SegmentMemory[] smems, int smemIndex, Memory nodeMem, BetaMemory bm, LinkedList<StackEntry> stack, Set<String> visitedRules, RuleExecutor executor) { RiaPathMemory pathMem = bm.getRiaRuleMemory(); SegmentMemory[] subnetworkSmems = pathMem.getSegmentMemories(); SegmentMemory subSmem = null; for (int i = 0; subSmem == null; i++) { // segment positions outside of the subnetwork, in the parent chain, are null // so we must iterate to find the first non null segment memory subSmem = subnetworkSmems[i]; } // if (betaNode.getLeftTupleSource().getSinkPropagator().size() == 2) { // // sub network is not part of share split, so need to handle propagation // // this ensures the first LeftTuple is actually the subnetwork node // // and the main outer network now receives the peer, notice the swap at the end // "srcTuples == peerTuples" // LeftTupleSets peerTuples = new LeftTupleSets(); // SegmentPropagator.processPeers(srcTuples, peerTuples, betaNode); // // Make sure subnetwork Segment has tuples to process // LeftTupleSets subnetworkStaged = subSmem.getStagedLeftTuples(); // subnetworkStaged.addAll(srcTuples); // // srcTuples.resetAll(); // // srcTuples = peerTuples; // } // Resume the node after the riaNode segment has been processed and the right input memory // populated StackEntry stackEntry = new StackEntry( liaNode, betaNode, sink, rmem, nodeMem, smems, smemIndex, srcTuples, visitedRules, false); stack.add(stackEntry); if (log.isTraceEnabled()) { int offset = getOffset(betaNode); log.trace( "{} RiaQueue {} {}", indent(offset), betaNode.toString(), srcTuples.toStringSizes()); } // RightInputAdapterNode riaNode = ( RightInputAdapterNode ) betaNode.getRightInput(); // RiaNodeMemory riaNodeMemory = (RiaNodeMemory) wm.getNodeMemory((MemoryFactory) // betaNode.getRightInput()); // LeftTupleSets riaStagedTuples = eval2( liaNode, pathMem, (LeftTupleSink) subSmem.getRootNode(), subSmem.getNodeMemories().getFirst(), subnetworkSmems, subSmem.getPos(), subSmem.getStagedLeftTuples(), wm, stack, visitedRules, true, executor); }
public void eval2( LeftInputAdapterNode liaNode, PathMemory rmem, NetworkNode node, Memory nodeMem, SegmentMemory[] smems, int smemIndex, LeftTupleSets trgTuples, InternalWorkingMemory wm, LinkedList<StackEntry> stack, Set<String> visitedRules, boolean processRian, RuleExecutor executor) { LeftTupleSets srcTuples; SegmentMemory smem = smems[smemIndex]; while (true) { srcTuples = trgTuples; // previous target, is now the source if (log.isTraceEnabled()) { int offset = getOffset(node); log.trace( "{} {} {} {}", indent(offset), ++cycle, node.toString(), srcTuples.toStringSizes()); } if (NodeTypeEnums.isTerminalNode(node)) { TerminalNode rtn = (TerminalNode) node; if (node.getType() == NodeTypeEnums.QueryTerminalNode) { pQtNode.doNode((QueryTerminalNode) rtn, wm, srcTuples, stack); } else { pRtNode.doNode(rtn, wm, srcTuples, executor); } return; } else if (NodeTypeEnums.RightInputAdaterNode == node.getType()) { doRiaNode2(wm, srcTuples, (RightInputAdapterNode) node, stack); return; } LeftTupleSets stagedLeftTuples; if (node == smem.getTipNode() && smem.getFirst() != null) { // we are about to process the segment tip, allow it to merge insert/update/delete clashes // Can happen if the next segments have not yet been initialized stagedLeftTuples = smem.getFirst().getStagedLeftTuples(); } else { stagedLeftTuples = null; } LeftTupleSinkNode sink = ((LeftTupleSource) node).getSinkPropagator().getFirstLeftTupleSink(); trgTuples = new LeftTupleSets(); if (NodeTypeEnums.isBetaNode(node)) { BetaNode betaNode = (BetaNode) node; BetaMemory bm = null; AccumulateMemory am = null; if (NodeTypeEnums.AccumulateNode == node.getType()) { am = (AccumulateMemory) nodeMem; bm = am.getBetaMemory(); } else { bm = (BetaMemory) nodeMem; } if (processRian && betaNode.isRightInputIsRiaNode()) { // if the subnetwork is nested in this segment, it will create srcTuples containing // peer LeftTuples, suitable for the node in the main path. doRiaNode( wm, liaNode, rmem, srcTuples, betaNode, sink, smems, smemIndex, nodeMem, bm, stack, visitedRules, executor); return; // return here is doRiaNode queues the evaluation on the stack, which is necessary // to handled nested query nodes } if (!bm.getDequeu().isEmpty()) { // If there are no staged RightTuples, then process the Dequeue, popping entries, until // another insert/expiration clash RightTupleSets rightTuples = bm.getStagedRightTuples(); if (rightTuples.isEmpty()) { // nothing staged, so now process the Dequeu Deque<RightTuple> que = bm.getDequeu(); while (!que.isEmpty()) { RightTuple rightTuple = que.peekFirst(); if (rightTuple.getPropagationContext().getType() == PropagationContext.EXPIRATION && // Cannot pop an expired fact, if the insert/update has not yet been evaluated. rightTuple.getStagedType() != LeftTuple.NONE) { break; } switch (rightTuple.getPropagationContext().getType()) { case PropagationContext.INSERTION: case PropagationContext.RULE_ADDITION: rightTuples.addInsert(rightTuple); break; case PropagationContext.MODIFICATION: rightTuples.addUpdate(rightTuple); break; case PropagationContext.DELETION: case PropagationContext.EXPIRATION: case PropagationContext.RULE_REMOVAL: rightTuples.addDelete(rightTuple); break; } que.removeFirst(); } } if (!bm.getDequeu().isEmpty()) { // The DeQue is not empty, add StackEntry for reprocessing. StackEntry stackEntry = new StackEntry( liaNode, node, sink, rmem, nodeMem, smems, smemIndex, trgTuples, visitedRules, false); stack.add(stackEntry); } } switch (node.getType()) { case NodeTypeEnums.JoinNode: { pJoinNode.doNode( (JoinNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.NotNode: { pNotNode.doNode((NotNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.ExistsNode: { pExistsNode.doNode( (ExistsNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.AccumulateNode: { pAccNode.doNode( (AccumulateNode) node, sink, am, wm, srcTuples, trgTuples, stagedLeftTuples); break; } } } else { switch (node.getType()) { case NodeTypeEnums.EvalConditionNode: { pEvalNode.doNode( (EvalConditionNode) node, (EvalMemory) nodeMem, sink, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.FromNode: { pFromNode.doNode( (FromNode) node, (FromMemory) nodeMem, sink, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.QueryElementNode: { QueryElementNodeMemory qmem = (QueryElementNodeMemory) nodeMem; if (srcTuples.isEmpty() && qmem.getResultLeftTuples().isEmpty()) { // no point in evaluating query element, and setting up stack, if there is nothing // to process break; } QueryElementNode qnode = (QueryElementNode) node; if (visitedRules == Collections.<String>emptySet()) { visitedRules = new HashSet<String>(); } visitedRules.add(qnode.getQueryElement().getQueryName()); // result tuples can happen when reactivity occurs inside of the query, prior to // evaluation // we will need special behaviour to add the results again, when this query result // resumes trgTuples.addAll(qmem.getResultLeftTuples()); if (!srcTuples.isEmpty()) { // only process the Query Node if there are src tuples StackEntry stackEntry = new StackEntry( liaNode, node, sink, rmem, nodeMem, smems, smemIndex, trgTuples, visitedRules, true); stack.add(stackEntry); pQueryNode.doNode( qnode, (QueryElementNodeMemory) nodeMem, stackEntry, sink, wm, srcTuples); SegmentMemory qsmem = ((QueryElementNodeMemory) nodeMem).getQuerySegmentMemory(); List<PathMemory> qrmems = qsmem.getPathMemories(); // Build the evaluation information for each 'or' branch // Exception fo the last, place each entry on the stack, the last one evaluate now. for (int i = qrmems.size() - 1; i >= 0; i--) { PathMemory qrmem = qrmems.get(i); rmem = qrmem; smems = qrmem.getSegmentMemories(); smemIndex = 0; smem = smems[smemIndex]; // 0 liaNode = (LeftInputAdapterNode) smem.getRootNode(); if (liaNode == smem.getTipNode()) { // segment only has liaNode in it // nothing is staged in the liaNode, so skip to next segment smem = smems[++smemIndex]; // 1 node = smem.getRootNode(); nodeMem = smem.getNodeMemories().getFirst(); } else { // lia is in shared segment, so point to next node node = liaNode.getSinkPropagator().getFirstLeftTupleSink(); nodeMem = smem.getNodeMemories().getFirst().getNext(); // skip the liaNode memory } trgTuples = smem.getStagedLeftTuples(); if (i != 0 && !trgTuples.isEmpty()) { // All entries except the last should be placed on the stack for evaluation // later. stackEntry = new StackEntry( liaNode, node, null, rmem, nodeMem, smems, smemIndex, trgTuples, visitedRules, false); if (log.isTraceEnabled()) { int offset = getOffset(stackEntry.getNode()); log.trace( "{} ORQueue branch={} {} {}", indent(offset), i, stackEntry.getNode().toString(), trgTuples.toStringSizes()); } stack.add(stackEntry); } } processRian = true; // make sure it's reset, so ria nodes are processed continue; } break; } case NodeTypeEnums.ConditionalBranchNode: { pBranchNode.doNode( (ConditionalBranchNode) node, (ConditionalBranchMemory) nodeMem, sink, wm, srcTuples, trgTuples, stagedLeftTuples, executor); break; } } } if (node != smem.getTipNode()) { // get next node and node memory in the segment node = sink; nodeMem = nodeMem.getNext(); } else { // Reached end of segment, start on new segment. SegmentPropagator.propagate(smem, trgTuples, wm); smem = smems[++smemIndex]; trgTuples = smem.getStagedLeftTuples(); if (log.isTraceEnabled()) { log.trace("Segment {}", smemIndex); } node = (LeftTupleSink) smem.getRootNode(); nodeMem = smem.getNodeMemories().getFirst(); } processRian = true; // make sure it's reset, so ria nodes are processed } }