Пример #1
0
  public int evaluateNetwork(PathMemory pmem, InternalWorkingMemory wm, RuleExecutor executor) {
    SegmentMemory[] smems = pmem.getSegmentMemories();

    int smemIndex = 0;
    SegmentMemory smem = smems[smemIndex]; // 0
    LeftInputAdapterNode liaNode = (LeftInputAdapterNode) smem.getRootNode();

    NetworkNode node;
    Memory nodeMem;
    if (liaNode == smem.getTipNode()) {
      // segment only has liaNode in it
      // nothing is staged in the liaNode, so skip to next segment
      smem = smems[++smemIndex]; // 1
      node = smem.getRootNode();
      nodeMem = smem.getNodeMemories().getFirst();
    } else {
      // lia is in shared segment, so point to next node
      node = liaNode.getSinkPropagator().getFirstLeftTupleSink();
      nodeMem = smem.getNodeMemories().getFirst().getNext(); // skip the liaNode memory
    }

    LeftTupleSets srcTuples = smem.getStagedLeftTuples();

    if (log.isTraceEnabled()) {
      log.trace(
          "Rule[name={}] segments={} {}",
          ((TerminalNode) pmem.getNetworkNode()).getRule().getName(),
          smems.length,
          srcTuples.toStringSizes());
    }

    Set<String> visitedRules;
    if (((TerminalNode) pmem.getNetworkNode()).getType() == NodeTypeEnums.QueryTerminalNode) {
      visitedRules = new HashSet<String>();
    } else {
      visitedRules = Collections.<String>emptySet();
    }

    LinkedList<StackEntry> stack = new LinkedList<StackEntry>();
    eval1(
        liaNode,
        pmem,
        (LeftTupleSink) node,
        nodeMem,
        smems,
        smemIndex,
        srcTuples,
        wm,
        stack,
        visitedRules,
        true,
        executor);

    return 0;
  }
Пример #2
0
  public synchronized void reEvaluateNetwork(
      InternalWorkingMemory wm, LinkedList<StackEntry> outerStack, boolean fireUntilHalt) {
    if (isDirty() || (pmem.getStreamQueue() != null && !pmem.getStreamQueue().isEmpty())) {
      setDirty(false);
      TupleEntryQueue queue =
          pmem.getStreamQueue() != null ? pmem.getStreamQueue().takeAllForFlushing() : null;

      if (queue == null || queue.isEmpty()) {
        NETWORK_EVALUATOR.evaluateNetwork(pmem, outerStack, this, wm);
      } else {
        while (!queue.isEmpty()) {
          removeQueuedTupleEntry(queue);
          NETWORK_EVALUATOR.evaluateNetwork(pmem, outerStack, this, wm);
        }
      }
    }
  }
Пример #3
0
  @Test
  public void testSplitTwoBeforeCreatedSegment() throws Exception {
    KnowledgeBase kbase1 =
        buildKnowledgeBase(
            "r1", "   A(1;)  A(2;) B(1;) B(2;) C(1;) C(2;) D(1;) D(2;) E(1;) E(2;)\n");
    kbase1.addKnowledgePackages(
        buildKnowledgePackage(
            "r2", "   A(1;)  A(2;) B(1;) B(2;) C(1;) C(2;) D(1;) D(2;) E(1;) E(2;)\n"));
    kbase1.addKnowledgePackages(
        buildKnowledgePackage("r3", "   A(1;)  A(2;) B(1;) B(2;) C(1;) C(2;) D(1;) D(2;)\n"));
    kbase1.addKnowledgePackages(
        buildKnowledgePackage("r4", "   A(1;)  A(2;) B(1;) B(2;) C(1;) C(2;) \n"));

    InternalWorkingMemory wm = ((InternalWorkingMemory) kbase1.newStatefulKnowledgeSession());
    List list = new ArrayList();
    wm.setGlobal("list", list);

    wm.insert(new E(1));
    wm.insert(new E(2));
    wm.flushPropagations();

    RuleTerminalNode rtn1 = getRtn("org.kie.r1", kbase1);
    RuleTerminalNode rtn2 = getRtn("org.kie.r2", kbase1);
    RuleTerminalNode rtn3 = getRtn("org.kie.r3", kbase1);
    RuleTerminalNode rtn4 = getRtn("org.kie.r4", kbase1);

    PathMemory pm1 = (PathMemory) wm.getNodeMemory(rtn1);
    SegmentMemory[] smems = pm1.getSegmentMemories();
    assertEquals(4, smems.length);
    assertNull(smems[0]);
    assertNull(smems[1]);
    assertNull(smems[3]);
    SegmentMemory sm = smems[2];
    assertEquals(2, sm.getPos());
    assertEquals(4, sm.getSegmentPosMaskBit());
    assertEquals(4, pm1.getLinkedSegmentMask());

    kbase1.addKnowledgePackages(buildKnowledgePackage("r5", "   A(1;)  A(2;) B(1;) B(2;) \n"));

    smems = pm1.getSegmentMemories();
    assertEquals(5, smems.length);
    assertNull(smems[0]);
    assertNull(smems[1]);
    assertNull(smems[2]);

    sm = smems[3];
    assertEquals(3, sm.getPos());
    assertEquals(8, sm.getSegmentPosMaskBit());
    assertEquals(8, pm1.getLinkedSegmentMask());

    RuleTerminalNode rtn5 = getRtn("org.kie.r5", kbase1);
    PathMemory pm5 = (PathMemory) wm.getNodeMemory(rtn5);
    smems = pm5.getSegmentMemories();
    assertEquals(2, smems.length);
    assertNull(smems[0]);
    assertNull(smems[1]);
  }
Пример #4
0
  @Test
  public void testPopulatedSharedToRtn() throws Exception {
    KnowledgeBase kbase1 = buildKnowledgeBase("r1", "   A() B() C() D() E()\n");
    InternalWorkingMemory wm = ((InternalWorkingMemory) kbase1.newStatefulKnowledgeSession());
    List list = new ArrayList();
    wm.setGlobal("list", list);

    wm.insert(new A(1));
    wm.insert(new A(2));
    wm.insert(new B(1));
    wm.insert(new C(1));
    wm.insert(new D(1));
    wm.insert(new E(1));

    wm.fireAllRules();
    assertEquals(2, list.size());

    kbase1.addKnowledgePackages(buildKnowledgePackage("r2", "   A() B() C() D() E()\n"));

    ObjectTypeNode eotn = getObjectTypeNode(kbase1, E.class);
    JoinNode eNode = (JoinNode) eotn.getSinkPropagator().getSinks()[0];
    RuleTerminalNode rtn = (RuleTerminalNode) eNode.getSinkPropagator().getLastLeftTupleSink();

    PathMemory pm = (PathMemory) wm.getNodeMemory(rtn);
    SegmentMemory sm = pm.getSegmentMemory();
    assertNotNull(sm.getStagedLeftTuples().getInsertFirst());
    assertNotNull(sm.getStagedLeftTuples().getInsertFirst().getStagedNext());
    assertNull(sm.getStagedLeftTuples().getInsertFirst().getStagedNext().getStagedNext());

    wm.fireAllRules();
    assertNull(sm.getStagedLeftTuples().getInsertFirst());
    assertEquals(4, list.size());

    System.out.println(list);

    assertEquals("r1", ((Match) list.get(0)).getRule().getName());
    assertEquals("r1", ((Match) list.get(1)).getRule().getName());
    assertEquals("r2", ((Match) list.get(2)).getRule().getName());
    assertEquals("r2", ((Match) list.get(3)).getRule().getName());
  }
Пример #5
0
 private List<TupleEntry> flushStreamQueue() {
   TupleEntryQueue tupleQueue = pmem.getStreamQueue().takeAllForFlushing();
   if (tupleQueue == null || tupleQueue.isEmpty()) {
     return Collections.emptyList();
   }
   List<TupleEntry> nonNormalizedDeletes = new ArrayList<TupleEntry>();
   while (!tupleQueue.isEmpty()) {
     TupleEntry tupleEntry = tupleQueue.remove();
     if (processStreamTupleEntry(tupleQueue, tupleEntry)) {
       nonNormalizedDeletes.add(tupleEntry);
     }
   }
   return nonNormalizedDeletes;
 }
Пример #6
0
  public void evaluateNetwork(PathMemory pmem, RuleExecutor executor, InternalWorkingMemory wm) {
    SegmentMemory[] smems = pmem.getSegmentMemories();

    int smemIndex = 0;
    SegmentMemory smem = smems[smemIndex]; // 0
    LeftInputAdapterNode liaNode = (LeftInputAdapterNode) smem.getRootNode();

    LinkedList<StackEntry> stack = new LinkedList<StackEntry>();

    NetworkNode node;
    Memory nodeMem;
    long bit = 1;
    if (liaNode == smem.getTipNode()) {
      // segment only has liaNode in it
      // nothing is staged in the liaNode, so skip to next segment
      smem = smems[++smemIndex]; // 1
      node = smem.getRootNode();
      nodeMem = smem.getNodeMemories().getFirst();
    } else {
      // lia is in shared segment, so point to next node
      bit = 2;
      node = liaNode.getSinkPropagator().getFirstLeftTupleSink();
      nodeMem = smem.getNodeMemories().getFirst().getNext(); // skip the liaNode memory
    }

    TupleSets<LeftTuple> srcTuples = smem.getStagedLeftTuples();
    if (log.isTraceEnabled()) {
      log.trace(
          "Rule[name={}] segments={} {}",
          ((TerminalNode) pmem.getNetworkNode()).getRule().getName(),
          smems.length,
          srcTuples.toStringSizes());
    }
    outerEval(
        liaNode, pmem, node, bit, nodeMem, smems, smemIndex, srcTuples, wm, stack, true, executor);
  }
Пример #7
0
  private int fire(
      InternalWorkingMemory wm,
      AgendaFilter filter,
      int fireCount,
      int fireLimit,
      LinkedList<StackEntry> outerStack,
      InternalAgenda agenda,
      boolean fireUntilHalt) {
    int localFireCount = 0;
    if (!tupleList.isEmpty()) {
      RuleTerminalNode rtn = (RuleTerminalNode) pmem.getNetworkNode();

      if (!fireExitedEarly && isDeclarativeAgendaEnabled()) {
        // Network Evaluation can notify meta rules, which should be given a chance to fire first
        RuleAgendaItem nextRule = agenda.peekNextRule();
        if (!isHighestSalience(nextRule, ruleAgendaItem.getSalience())) {
          fireExitedEarly = true;
          return localFireCount;
        }
      }

      while (!tupleList.isEmpty()) {
        LeftTuple leftTuple;
        if (queue != null) {
          leftTuple = (LeftTuple) queue.dequeue();
          tupleList.remove(leftTuple);
        } else {
          leftTuple = tupleList.removeFirst();
          ((Activation) leftTuple).setQueued(false);
        }

        rtn =
            (RuleTerminalNode)
                leftTuple
                    .getSink(); // branches result in multiple RTN's for a given rule, so unwrap per
        // LeftTuple
        RuleImpl rule = rtn.getRule();

        PropagationContext pctx = leftTuple.getPropagationContext();
        pctx = RuleTerminalNode.findMostRecentPropagationContext(leftTuple, pctx);

        // check if the rule is not effective or
        // if the current Rule is no-loop and the origin rule is the same then return
        if (cancelAndContinue(wm, rtn, rule, leftTuple, pctx, filter)) {
          continue;
        }

        AgendaItem item = (AgendaItem) leftTuple;
        if (agenda.getActivationsFilter() != null
            && !agenda.getActivationsFilter().accept(item, wm, rtn)) {
          // only relevant for seralization, to not refire Matches already fired
          continue;
        }

        agenda.fireActivation(item);
        localFireCount++;

        if (rtn.getLeftTupleSource() == null) {
          break; // The activation firing removed this rule from the rule base
        }

        int salience =
            ruleAgendaItem.getSalience(); // dyanmic salience may have updated it, so get again.
        if (queue != null && !queue.isEmpty() && salience != queue.peek().getSalience()) {
          ruleAgendaItem.dequeue();
          ruleAgendaItem.setSalience(queue.peek().getSalience());
          ruleAgendaItem.getAgendaGroup().add(ruleAgendaItem);
          salience = ruleAgendaItem.getSalience();
        }

        RuleAgendaItem nextRule = agenda.peekNextRule();
        if (haltRuleFiring(nextRule, fireCount, fireLimit, localFireCount, agenda, salience)) {
          break; // another rule has high priority and is on the agenda, so evaluate it first
        }
        reEvaluateNetwork(wm, outerStack, fireUntilHalt);
        wm.executeQueuedActions();

        if (tupleList.isEmpty() && !outerStack.isEmpty()) {
          // the outer stack is nodes needing evaluation, once all rule firing is done
          // such as window expiration, which must be done serially
          StackEntry entry = outerStack.removeFirst();
          NETWORK_EVALUATOR.evalStackEntry(entry, outerStack, outerStack, this, wm);
        }
      }
    }

    removeRuleAgendaItemWhenEmpty(wm);

    fireExitedEarly = false;
    return localFireCount;
  }
Пример #8
0
  private boolean evalQueryNode(
      LeftInputAdapterNode liaNode,
      PathMemory pmem,
      NetworkNode node,
      long bit,
      Memory nodeMem,
      SegmentMemory[] smems,
      int smemIndex,
      TupleSets<LeftTuple> trgTuples,
      InternalWorkingMemory wm,
      LinkedList<StackEntry> stack,
      TupleSets<LeftTuple> srcTuples,
      LeftTupleSinkNode sink,
      TupleSets<LeftTuple> stagedLeftTuples) {
    QueryElementNodeMemory qmem = (QueryElementNodeMemory) nodeMem;

    if (srcTuples.isEmpty() && qmem.getResultLeftTuples().isEmpty()) {
      // no point in evaluating query element, and setting up stack, if there is nothing to process
      return false;
    }

    QueryElementNode qnode = (QueryElementNode) node;

    if (log.isTraceEnabled()) {
      int offset = getOffset(node);
      log.trace(
          "{} query result tuples {}", indent(offset), qmem.getResultLeftTuples().toStringSizes());
    }

    // result tuples can happen when reactivity occurs inside of the query, prior to evaluation
    // we will need special behaviour to add the results again, when this query result resumes
    trgTuples.addAll(qmem.getResultLeftTuples());
    qmem.setNodeCleanWithoutNotify();

    if (!srcTuples.isEmpty()) {
      // only process the Query Node if there are src tuples
      StackEntry stackEntry =
          new StackEntry(
              liaNode, node, bit, sink, pmem, nodeMem, smems, smemIndex, trgTuples, true, true);

      stack.add(stackEntry);

      pQueryNode.doNode(
          qnode,
          (QueryElementNodeMemory) nodeMem,
          stackEntry,
          wm,
          srcTuples,
          trgTuples,
          stagedLeftTuples);

      SegmentMemory qsmem = ((QueryElementNodeMemory) nodeMem).getQuerySegmentMemory();
      List<PathMemory> qpmems = qsmem.getPathMemories();

      // Build the evaluation information for each 'or' branch
      for (int i = 0; i < qpmems.size(); i++) {
        PathMemory qpmem = qpmems.get(i);

        pmem = qpmem;
        smems = qpmem.getSegmentMemories();
        smemIndex = 0;
        SegmentMemory smem = smems[smemIndex]; // 0
        liaNode = (LeftInputAdapterNode) smem.getRootNode();

        if (liaNode == smem.getTipNode()) {
          // segment only has liaNode in it
          // nothing is staged in the liaNode, so skip to next segment
          smem = smems[++smemIndex]; // 1
          node = smem.getRootNode();
          nodeMem = smem.getNodeMemories().getFirst();
          bit = 1;
        } else {
          // lia is in shared segment, so point to next node
          node = liaNode.getSinkPropagator().getFirstLeftTupleSink();
          nodeMem = smem.getNodeMemories().getFirst().getNext(); // skip the liaNode memory
          bit = 2;
        }

        trgTuples = smem.getStagedLeftTuples().takeAll();
        stackEntry =
            new StackEntry(
                liaNode, node, bit, null, pmem, nodeMem, smems, smemIndex, trgTuples, false, true);
        if (log.isTraceEnabled()) {
          int offset = getOffset(stackEntry.getNode());
          log.trace(
              "{} ORQueue branch={} {} {}",
              indent(offset),
              i,
              stackEntry.getNode().toString(),
              trgTuples.toStringSizes());
        }
        stack.add(stackEntry);
      }
      return true;
    } else {
      return false;
    }
  }
Пример #9
0
  public void eval2(
      LeftInputAdapterNode liaNode,
      PathMemory rmem,
      NetworkNode node,
      Memory nodeMem,
      SegmentMemory[] smems,
      int smemIndex,
      LeftTupleSets trgTuples,
      InternalWorkingMemory wm,
      LinkedList<StackEntry> stack,
      Set<String> visitedRules,
      boolean processRian,
      RuleExecutor executor) {
    LeftTupleSets srcTuples;
    SegmentMemory smem = smems[smemIndex];
    while (true) {
      srcTuples = trgTuples; // previous target, is now the source
      if (log.isTraceEnabled()) {
        int offset = getOffset(node);
        log.trace(
            "{} {} {} {}", indent(offset), ++cycle, node.toString(), srcTuples.toStringSizes());
      }

      if (NodeTypeEnums.isTerminalNode(node)) {
        TerminalNode rtn = (TerminalNode) node;
        if (node.getType() == NodeTypeEnums.QueryTerminalNode) {
          pQtNode.doNode((QueryTerminalNode) rtn, wm, srcTuples, stack);
        } else {
          pRtNode.doNode(rtn, wm, srcTuples, executor);
        }
        return;
      } else if (NodeTypeEnums.RightInputAdaterNode == node.getType()) {
        doRiaNode2(wm, srcTuples, (RightInputAdapterNode) node, stack);
        return;
      }

      LeftTupleSets stagedLeftTuples;
      if (node == smem.getTipNode() && smem.getFirst() != null) {
        // we are about to process the segment tip, allow it to merge insert/update/delete clashes
        // Can happen if the next segments have not yet been initialized
        stagedLeftTuples = smem.getFirst().getStagedLeftTuples();
      } else {
        stagedLeftTuples = null;
      }

      LeftTupleSinkNode sink = ((LeftTupleSource) node).getSinkPropagator().getFirstLeftTupleSink();

      trgTuples = new LeftTupleSets();

      if (NodeTypeEnums.isBetaNode(node)) {
        BetaNode betaNode = (BetaNode) node;

        BetaMemory bm = null;
        AccumulateMemory am = null;
        if (NodeTypeEnums.AccumulateNode == node.getType()) {
          am = (AccumulateMemory) nodeMem;
          bm = am.getBetaMemory();
        } else {
          bm = (BetaMemory) nodeMem;
        }

        if (processRian && betaNode.isRightInputIsRiaNode()) {
          // if the subnetwork is nested in this segment, it will create srcTuples containing
          // peer LeftTuples, suitable for the node in the main path.
          doRiaNode(
              wm,
              liaNode,
              rmem,
              srcTuples,
              betaNode,
              sink,
              smems,
              smemIndex,
              nodeMem,
              bm,
              stack,
              visitedRules,
              executor);
          return; // return here is doRiaNode queues the evaluation on the stack, which is necessary
          // to handled nested query nodes
        }

        if (!bm.getDequeu().isEmpty()) {
          // If there are no staged RightTuples, then process the Dequeue, popping entries, until
          // another insert/expiration clash
          RightTupleSets rightTuples = bm.getStagedRightTuples();
          if (rightTuples.isEmpty()) {
            // nothing staged, so now process the Dequeu
            Deque<RightTuple> que = bm.getDequeu();
            while (!que.isEmpty()) {
              RightTuple rightTuple = que.peekFirst();
              if (rightTuple.getPropagationContext().getType() == PropagationContext.EXPIRATION
                  &&
                  // Cannot pop an expired fact, if the insert/update has not yet been evaluated.
                  rightTuple.getStagedType() != LeftTuple.NONE) {
                break;
              }

              switch (rightTuple.getPropagationContext().getType()) {
                case PropagationContext.INSERTION:
                case PropagationContext.RULE_ADDITION:
                  rightTuples.addInsert(rightTuple);
                  break;
                case PropagationContext.MODIFICATION:
                  rightTuples.addUpdate(rightTuple);
                  break;
                case PropagationContext.DELETION:
                case PropagationContext.EXPIRATION:
                case PropagationContext.RULE_REMOVAL:
                  rightTuples.addDelete(rightTuple);
                  break;
              }
              que.removeFirst();
            }
          }

          if (!bm.getDequeu().isEmpty()) {
            // The DeQue is not empty, add StackEntry for reprocessing.
            StackEntry stackEntry =
                new StackEntry(
                    liaNode,
                    node,
                    sink,
                    rmem,
                    nodeMem,
                    smems,
                    smemIndex,
                    trgTuples,
                    visitedRules,
                    false);
            stack.add(stackEntry);
          }
        }

        switch (node.getType()) {
          case NodeTypeEnums.JoinNode:
            {
              pJoinNode.doNode(
                  (JoinNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples);
              break;
            }
          case NodeTypeEnums.NotNode:
            {
              pNotNode.doNode((NotNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples);
              break;
            }
          case NodeTypeEnums.ExistsNode:
            {
              pExistsNode.doNode(
                  (ExistsNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples);
              break;
            }
          case NodeTypeEnums.AccumulateNode:
            {
              pAccNode.doNode(
                  (AccumulateNode) node, sink, am, wm, srcTuples, trgTuples, stagedLeftTuples);
              break;
            }
        }
      } else {
        switch (node.getType()) {
          case NodeTypeEnums.EvalConditionNode:
            {
              pEvalNode.doNode(
                  (EvalConditionNode) node,
                  (EvalMemory) nodeMem,
                  sink,
                  wm,
                  srcTuples,
                  trgTuples,
                  stagedLeftTuples);
              break;
            }
          case NodeTypeEnums.FromNode:
            {
              pFromNode.doNode(
                  (FromNode) node,
                  (FromMemory) nodeMem,
                  sink,
                  wm,
                  srcTuples,
                  trgTuples,
                  stagedLeftTuples);
              break;
            }
          case NodeTypeEnums.QueryElementNode:
            {
              QueryElementNodeMemory qmem = (QueryElementNodeMemory) nodeMem;

              if (srcTuples.isEmpty() && qmem.getResultLeftTuples().isEmpty()) {
                // no point in evaluating query element, and setting up stack, if there is nothing
                // to process
                break;
              }

              QueryElementNode qnode = (QueryElementNode) node;
              if (visitedRules == Collections.<String>emptySet()) {
                visitedRules = new HashSet<String>();
              }
              visitedRules.add(qnode.getQueryElement().getQueryName());

              // result tuples can happen when reactivity occurs inside of the query, prior to
              // evaluation
              // we will need special behaviour to add the results again, when this query result
              // resumes
              trgTuples.addAll(qmem.getResultLeftTuples());

              if (!srcTuples.isEmpty()) {
                // only process the Query Node if there are src tuples
                StackEntry stackEntry =
                    new StackEntry(
                        liaNode,
                        node,
                        sink,
                        rmem,
                        nodeMem,
                        smems,
                        smemIndex,
                        trgTuples,
                        visitedRules,
                        true);

                stack.add(stackEntry);

                pQueryNode.doNode(
                    qnode, (QueryElementNodeMemory) nodeMem, stackEntry, sink, wm, srcTuples);

                SegmentMemory qsmem = ((QueryElementNodeMemory) nodeMem).getQuerySegmentMemory();
                List<PathMemory> qrmems = qsmem.getPathMemories();

                // Build the evaluation information for each 'or' branch
                // Exception fo the last, place each entry on the stack, the last one evaluate now.
                for (int i = qrmems.size() - 1; i >= 0; i--) {
                  PathMemory qrmem = qrmems.get(i);

                  rmem = qrmem;
                  smems = qrmem.getSegmentMemories();
                  smemIndex = 0;
                  smem = smems[smemIndex]; // 0
                  liaNode = (LeftInputAdapterNode) smem.getRootNode();

                  if (liaNode == smem.getTipNode()) {
                    // segment only has liaNode in it
                    // nothing is staged in the liaNode, so skip to next segment
                    smem = smems[++smemIndex]; // 1
                    node = smem.getRootNode();
                    nodeMem = smem.getNodeMemories().getFirst();
                  } else {
                    // lia is in shared segment, so point to next node
                    node = liaNode.getSinkPropagator().getFirstLeftTupleSink();
                    nodeMem =
                        smem.getNodeMemories().getFirst().getNext(); // skip the liaNode memory
                  }

                  trgTuples = smem.getStagedLeftTuples();

                  if (i != 0 && !trgTuples.isEmpty()) {
                    // All entries except the last should be placed on the stack for evaluation
                    // later.
                    stackEntry =
                        new StackEntry(
                            liaNode,
                            node,
                            null,
                            rmem,
                            nodeMem,
                            smems,
                            smemIndex,
                            trgTuples,
                            visitedRules,
                            false);
                    if (log.isTraceEnabled()) {
                      int offset = getOffset(stackEntry.getNode());
                      log.trace(
                          "{} ORQueue branch={} {} {}",
                          indent(offset),
                          i,
                          stackEntry.getNode().toString(),
                          trgTuples.toStringSizes());
                    }
                    stack.add(stackEntry);
                  }
                }
                processRian = true; //  make sure it's reset, so ria nodes are processed
                continue;
              }
              break;
            }
          case NodeTypeEnums.ConditionalBranchNode:
            {
              pBranchNode.doNode(
                  (ConditionalBranchNode) node,
                  (ConditionalBranchMemory) nodeMem,
                  sink,
                  wm,
                  srcTuples,
                  trgTuples,
                  stagedLeftTuples,
                  executor);
              break;
            }
        }
      }

      if (node != smem.getTipNode()) {
        // get next node and node memory in the segment
        node = sink;
        nodeMem = nodeMem.getNext();
      } else {
        // Reached end of segment, start on new segment.
        SegmentPropagator.propagate(smem, trgTuples, wm);
        smem = smems[++smemIndex];
        trgTuples = smem.getStagedLeftTuples();
        if (log.isTraceEnabled()) {
          log.trace("Segment {}", smemIndex);
        }
        node = (LeftTupleSink) smem.getRootNode();
        nodeMem = smem.getNodeMemories().getFirst();
      }
      processRian = true; //  make sure it's reset, so ria nodes are processed
    }
  }