Ejemplo n.º 1
0
  public static int getOffset(NetworkNode node) {
    LeftTupleSource lt;
    int offset = 1;
    if (NodeTypeEnums.isTerminalNode(node)) {
      lt = ((TerminalNode) node).getLeftTupleSource();
      offset++;
    } else if (node.getType() == NodeTypeEnums.RightInputAdaterNode) {
      lt = ((RightInputAdapterNode) node).getLeftTupleSource();
    } else {
      lt = (LeftTupleSource) node;
    }
    while (lt.getType() != NodeTypeEnums.LeftInputAdapterNode) {
      offset++;
      lt = lt.getLeftTupleSource();
    }

    return offset;
  }
Ejemplo n.º 2
0
  private void switchOnDoBetaNode(
      NetworkNode node,
      TupleSets<LeftTuple> trgTuples,
      InternalWorkingMemory wm,
      TupleSets<LeftTuple> srcTuples,
      TupleSets<LeftTuple> stagedLeftTuples,
      LeftTupleSinkNode sink,
      BetaMemory bm,
      AccumulateMemory am) {
    if (log.isTraceEnabled()) {
      int offset = getOffset(node);
      log.trace("{} rightTuples {}", indent(offset), bm.getStagedRightTuples().toStringSizes());
    }

    switch (node.getType()) {
      case NodeTypeEnums.JoinNode:
        {
          pJoinNode.doNode((JoinNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples);
          break;
        }
      case NodeTypeEnums.NotNode:
        {
          pNotNode.doNode((NotNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples);
          break;
        }
      case NodeTypeEnums.ExistsNode:
        {
          pExistsNode.doNode(
              (ExistsNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples);
          break;
        }
      case NodeTypeEnums.AccumulateNode:
        {
          pAccNode.doNode(
              (AccumulateNode) node, sink, am, wm, srcTuples, trgTuples, stagedLeftTuples);
          break;
        }
    }
  }
Ejemplo n.º 3
0
  private boolean evalBetaNode(
      LeftInputAdapterNode liaNode,
      PathMemory pmem,
      NetworkNode node,
      Memory nodeMem,
      SegmentMemory[] smems,
      int smemIndex,
      TupleSets<LeftTuple> trgTuples,
      InternalWorkingMemory wm,
      LinkedList<StackEntry> stack,
      boolean processRian,
      RuleExecutor executor,
      TupleSets<LeftTuple> srcTuples,
      TupleSets<LeftTuple> stagedLeftTuples,
      LeftTupleSinkNode sink) {
    BetaNode betaNode = (BetaNode) node;
    BetaMemory bm;
    AccumulateMemory am = null;
    if (NodeTypeEnums.AccumulateNode == node.getType()) {
      am = (AccumulateMemory) nodeMem;
      bm = am.getBetaMemory();
    } else {
      bm = (BetaMemory) nodeMem;
    }

    if (processRian && betaNode.isRightInputIsRiaNode()) {
      // if the subnetwork is nested in this segment, it will create srcTuples containing
      // peer LeftTuples, suitable for the node in the main path.
      doRiaNode(
          wm, liaNode, pmem, srcTuples, betaNode, sink, smems, smemIndex, nodeMem, bm, stack,
          executor);
      return true; // return here, doRiaNode queues the evaluation on the stack, which is necessary
      // to handled nested query nodes
    }

    switchOnDoBetaNode(node, trgTuples, wm, srcTuples, stagedLeftTuples, sink, bm, am);

    return false;
  }
Ejemplo n.º 4
0
 public TupleSets<LeftTuple> evalNode(
     LeftInputAdapterNode liaNode,
     PathMemory pmem,
     NetworkNode node,
     long bit,
     Memory nodeMem,
     SegmentMemory[] smems,
     int smemIndex,
     InternalWorkingMemory wm,
     LinkedList<StackEntry> stack,
     boolean processRian,
     RuleExecutor executor,
     TupleSets<LeftTuple> srcTuples,
     SegmentMemory smem,
     TupleSets<LeftTuple> stagedLeftTuples,
     LeftTupleSinkNode sink) {
   TupleSets<LeftTuple> trgTuples = new TupleSetsImpl<LeftTuple>();
   if (NodeTypeEnums.isBetaNode(node)) {
     boolean exitInnerEval =
         evalBetaNode(
             liaNode,
             pmem,
             node,
             nodeMem,
             smems,
             smemIndex,
             trgTuples,
             wm,
             stack,
             processRian,
             executor,
             srcTuples,
             stagedLeftTuples,
             sink);
     if (exitInnerEval) {
       return null;
     }
   } else {
     boolean exitInnerEval = false;
     switch (node.getType()) {
       case NodeTypeEnums.EvalConditionNode:
         {
           pEvalNode.doNode(
               (EvalConditionNode) node,
               (EvalMemory) nodeMem,
               sink,
               wm,
               srcTuples,
               trgTuples,
               stagedLeftTuples);
           break;
         }
       case NodeTypeEnums.FromNode:
         {
           pFromNode.doNode(
               (FromNode) node,
               (FromMemory) nodeMem,
               sink,
               wm,
               srcTuples,
               trgTuples,
               stagedLeftTuples);
           break;
         }
       case NodeTypeEnums.ReactiveFromNode:
         {
           pReactiveFromNode.doNode(
               (ReactiveFromNode) node,
               (ReactiveFromNode.ReactiveFromMemory) nodeMem,
               sink,
               wm,
               srcTuples,
               trgTuples,
               stagedLeftTuples);
           break;
         }
       case NodeTypeEnums.QueryElementNode:
         {
           exitInnerEval =
               evalQueryNode(
                   liaNode,
                   pmem,
                   node,
                   bit,
                   nodeMem,
                   smems,
                   smemIndex,
                   trgTuples,
                   wm,
                   stack,
                   srcTuples,
                   sink,
                   stagedLeftTuples);
           break;
         }
       case NodeTypeEnums.TimerConditionNode:
         {
           pTimerNode.doNode(
               (TimerNode) node,
               (TimerNodeMemory) nodeMem,
               pmem,
               smem,
               sink,
               wm,
               srcTuples,
               trgTuples,
               stagedLeftTuples);
           break;
         }
       case NodeTypeEnums.ConditionalBranchNode:
         {
           pBranchNode.doNode(
               (ConditionalBranchNode) node,
               (ConditionalBranchMemory) nodeMem,
               sink,
               wm,
               srcTuples,
               trgTuples,
               stagedLeftTuples,
               executor);
           break;
         }
     }
     if (exitInnerEval && trgTuples.isEmpty()) {
       return null;
     }
   }
   return trgTuples;
 }
Ejemplo n.º 5
0
  public void innerEval(
      LeftInputAdapterNode liaNode,
      PathMemory pmem,
      NetworkNode node,
      long bit,
      Memory nodeMem,
      SegmentMemory[] smems,
      int smemIndex,
      TupleSets<LeftTuple> trgTuples,
      InternalWorkingMemory wm,
      LinkedList<StackEntry> stack,
      boolean processRian,
      RuleExecutor executor) {
    TupleSets<LeftTuple> srcTuples;
    SegmentMemory smem = smems[smemIndex];
    TupleSets<LeftTuple> stagedLeftTuples = null;
    while (true) {
      srcTuples = trgTuples; // previous target, is now the source
      if (log.isTraceEnabled()) {
        int offset = getOffset(node);
        log.trace(
            "{} {} {} {}", indent(offset), ++cycle, node.toString(), srcTuples.toStringSizes());
      }

      boolean emptySrcTuples = srcTuples.isEmpty();
      if (!(NodeTypeEnums.isBetaNode(node) && ((BetaNode) node).isRightInputIsRiaNode())) {
        // The engine cannot skip a ria node, as the dirty might be several levels deep
        if (emptySrcTuples && smem.getDirtyNodeMask() == 0) {
          // empty sources and segment is not dirty, skip to non empty src tuples or dirty segment.
          boolean foundDirty = false;
          for (int i = ++smemIndex, length = smems.length; i < length; i++) {
            if (log.isTraceEnabled()) {
              int offset = getOffset(node);
              log.trace("{} Skip Segment {}", indent(offset), i - 1);
            }

            // this is needed for subnetworks that feed into a parent network that has no right
            // inputs,
            // and may not yet be initialized
            if (smem.isEmpty() && !NodeTypeEnums.isTerminalNode(smem.getTipNode())) {
              SegmentUtilities.createChildSegments(
                  wm, smem, ((LeftTupleSource) smem.getTipNode()).getSinkPropagator());
            }

            smem = smems[i];
            bit = 1;
            srcTuples = smem.getStagedLeftTuples().takeAll();
            emptySrcTuples = srcTuples.isEmpty();
            node = smem.getRootNode();
            nodeMem = smem.getNodeMemories().getFirst();
            if (!emptySrcTuples
                || smem.getDirtyNodeMask() != 0
                || (NodeTypeEnums.isBetaNode(node) && ((BetaNode) node).isRightInputIsRiaNode())) {
              // break if dirty or if we reach a subnetwork. It must break for subnetworks, so they
              // can be searched.
              foundDirty = true;
              smemIndex = i;
              break;
            }
          }
          if (!foundDirty) {
            break;
          }
        }
        if (log.isTraceEnabled()) {
          int offset = getOffset(node);
          log.trace("{} Segment {}", indent(offset), smemIndex);
          log.trace(
              "{} {} {} {}", indent(offset), cycle, node.toString(), srcTuples.toStringSizes());
        }
      }

      long dirtyMask = smem.getDirtyNodeMask();
      if (emptySrcTuples) {
        while ((dirtyMask & bit) == 0
            && node != smem.getTipNode()
            && !(NodeTypeEnums.isBetaNode(node) && ((BetaNode) node).isRightInputIsRiaNode())) {
          if (log.isTraceEnabled()) {
            int offset = getOffset(node);
            log.trace("{} Skip Node {}", indent(offset), node);
          }
          bit = bit << 1; // shift to check the next node
          node = ((LeftTupleSource) node).getSinkPropagator().getFirstLeftTupleSink();
          nodeMem = nodeMem.getNext();
        }
      }

      if (NodeTypeEnums.isTerminalNode(node)) {
        TerminalNode rtn = (TerminalNode) node;
        if (node.getType() == NodeTypeEnums.QueryTerminalNode) {
          pQtNode.doNode((QueryTerminalNode) rtn, wm, srcTuples, stack);
        } else {
          pRtNode.doNode(rtn, wm, srcTuples, executor);
        }
        break;
      } else if (NodeTypeEnums.RightInputAdaterNode == node.getType()) {
        doRiaNode2(wm, srcTuples, (RightInputAdapterNode) node);
        break;
      }

      stagedLeftTuples = getTargetStagedLeftTuples(node, wm, smem);
      LeftTupleSinkNode sink = ((LeftTupleSource) node).getSinkPropagator().getFirstLeftTupleSink();

      trgTuples =
          evalNode(
              liaNode,
              pmem,
              node,
              bit,
              nodeMem,
              smems,
              smemIndex,
              wm,
              stack,
              processRian,
              executor,
              srcTuples,
              smem,
              stagedLeftTuples,
              sink);
      if (trgTuples == null) {
        break; // Queries exists and has been placed StackEntry, and there are no current trgTuples
        // to process
      }

      if (node != smem.getTipNode()) {
        // get next node and node memory in the segment
        node = sink;
        nodeMem = nodeMem.getNext();
        bit = bit << 1;
      } else {
        // Reached end of segment, start on new segment.
        smem.getFirst().getStagedLeftTuples().addAll(stagedLeftTuples); // must put back all the LTs
        // end of SegmentMemory, so we know that stagedLeftTuples is not null
        SegmentPropagator.propagate(smem, trgTuples, wm);
        bit = 1;
        smem = smems[++smemIndex];
        trgTuples = smem.getStagedLeftTuples().takeAll();

        if (log.isTraceEnabled()) {
          int offset = getOffset(node);
          log.trace("{} Segment {}", indent(offset), smemIndex);
        }
        node = smem.getRootNode();
        nodeMem = smem.getNodeMemories().getFirst();
      }
      processRian = true; //  make sure it's reset, so ria nodes are processed
    }

    if (stagedLeftTuples != null && !stagedLeftTuples.isEmpty()) {
      smem.getFirst().getStagedLeftTuples().addAll(stagedLeftTuples); // must put back all the LTs
    }
  }
Ejemplo n.º 6
0
  public void evalStackEntry(
      StackEntry entry,
      LinkedList<StackEntry> stack,
      RuleExecutor executor,
      InternalWorkingMemory wm) {
    NetworkNode node = entry.getNode();
    Memory nodeMem = entry.getNodeMem();
    TupleSets<LeftTuple> trgTuples = entry.getTrgTuples();
    if (node.getType() == NodeTypeEnums.QueryElementNode) {
      // copy across the results, if any from the query node memory
      QueryElementNodeMemory qmem = (QueryElementNodeMemory) nodeMem;
      qmem.setNodeCleanWithoutNotify();
      trgTuples.addAll(qmem.getResultLeftTuples());
    }

    LeftTupleSinkNode sink = entry.getSink();
    PathMemory pmem = entry.getRmem();

    SegmentMemory[] smems = entry.getSmems();
    int smemIndex = entry.getSmemIndex();
    boolean processRian = entry.isProcessRian();

    long bit = entry.getBit();
    if (entry.isResumeFromNextNode()) {
      SegmentMemory smem = smems[smemIndex];
      if (node != smem.getTipNode()) {
        // get next node and node memory in the segment
        LeftTupleSink nextSink = sink.getNextLeftTupleSinkNode();
        if (nextSink == null) {
          node = sink;
        } else {
          // there is a nested subnetwork, take out path
          node = nextSink;
        }

        nodeMem = nodeMem.getNext();
        bit = bit << 1; // update bit to new node
      } else {
        // Reached end of segment, start on new segment.
        SegmentPropagator.propagate(smem, trgTuples, wm);
        smem = smems[++smemIndex];
        trgTuples = smem.getStagedLeftTuples().takeAll();
        node = smem.getRootNode();
        nodeMem = smem.getNodeMemories().getFirst();
        bit = 1; // update bit to start of new segment
      }
    }

    if (log.isTraceEnabled()) {
      int offset = getOffset(node);
      log.trace("{} Resume {} {}", indent(offset), node.toString(), trgTuples.toStringSizes());
    }
    innerEval(
        entry.getLiaNode(),
        pmem,
        node,
        bit,
        nodeMem,
        smems,
        smemIndex,
        trgTuples,
        wm,
        stack,
        processRian,
        executor);
  }
Ejemplo n.º 7
0
  public void eval2(
      LeftInputAdapterNode liaNode,
      PathMemory rmem,
      NetworkNode node,
      Memory nodeMem,
      SegmentMemory[] smems,
      int smemIndex,
      LeftTupleSets trgTuples,
      InternalWorkingMemory wm,
      LinkedList<StackEntry> stack,
      Set<String> visitedRules,
      boolean processRian,
      RuleExecutor executor) {
    LeftTupleSets srcTuples;
    SegmentMemory smem = smems[smemIndex];
    while (true) {
      srcTuples = trgTuples; // previous target, is now the source
      if (log.isTraceEnabled()) {
        int offset = getOffset(node);
        log.trace(
            "{} {} {} {}", indent(offset), ++cycle, node.toString(), srcTuples.toStringSizes());
      }

      if (NodeTypeEnums.isTerminalNode(node)) {
        TerminalNode rtn = (TerminalNode) node;
        if (node.getType() == NodeTypeEnums.QueryTerminalNode) {
          pQtNode.doNode((QueryTerminalNode) rtn, wm, srcTuples, stack);
        } else {
          pRtNode.doNode(rtn, wm, srcTuples, executor);
        }
        return;
      } else if (NodeTypeEnums.RightInputAdaterNode == node.getType()) {
        doRiaNode2(wm, srcTuples, (RightInputAdapterNode) node, stack);
        return;
      }

      LeftTupleSets stagedLeftTuples;
      if (node == smem.getTipNode() && smem.getFirst() != null) {
        // we are about to process the segment tip, allow it to merge insert/update/delete clashes
        // Can happen if the next segments have not yet been initialized
        stagedLeftTuples = smem.getFirst().getStagedLeftTuples();
      } else {
        stagedLeftTuples = null;
      }

      LeftTupleSinkNode sink = ((LeftTupleSource) node).getSinkPropagator().getFirstLeftTupleSink();

      trgTuples = new LeftTupleSets();

      if (NodeTypeEnums.isBetaNode(node)) {
        BetaNode betaNode = (BetaNode) node;

        BetaMemory bm = null;
        AccumulateMemory am = null;
        if (NodeTypeEnums.AccumulateNode == node.getType()) {
          am = (AccumulateMemory) nodeMem;
          bm = am.getBetaMemory();
        } else {
          bm = (BetaMemory) nodeMem;
        }

        if (processRian && betaNode.isRightInputIsRiaNode()) {
          // if the subnetwork is nested in this segment, it will create srcTuples containing
          // peer LeftTuples, suitable for the node in the main path.
          doRiaNode(
              wm,
              liaNode,
              rmem,
              srcTuples,
              betaNode,
              sink,
              smems,
              smemIndex,
              nodeMem,
              bm,
              stack,
              visitedRules,
              executor);
          return; // return here is doRiaNode queues the evaluation on the stack, which is necessary
          // to handled nested query nodes
        }

        if (!bm.getDequeu().isEmpty()) {
          // If there are no staged RightTuples, then process the Dequeue, popping entries, until
          // another insert/expiration clash
          RightTupleSets rightTuples = bm.getStagedRightTuples();
          if (rightTuples.isEmpty()) {
            // nothing staged, so now process the Dequeu
            Deque<RightTuple> que = bm.getDequeu();
            while (!que.isEmpty()) {
              RightTuple rightTuple = que.peekFirst();
              if (rightTuple.getPropagationContext().getType() == PropagationContext.EXPIRATION
                  &&
                  // Cannot pop an expired fact, if the insert/update has not yet been evaluated.
                  rightTuple.getStagedType() != LeftTuple.NONE) {
                break;
              }

              switch (rightTuple.getPropagationContext().getType()) {
                case PropagationContext.INSERTION:
                case PropagationContext.RULE_ADDITION:
                  rightTuples.addInsert(rightTuple);
                  break;
                case PropagationContext.MODIFICATION:
                  rightTuples.addUpdate(rightTuple);
                  break;
                case PropagationContext.DELETION:
                case PropagationContext.EXPIRATION:
                case PropagationContext.RULE_REMOVAL:
                  rightTuples.addDelete(rightTuple);
                  break;
              }
              que.removeFirst();
            }
          }

          if (!bm.getDequeu().isEmpty()) {
            // The DeQue is not empty, add StackEntry for reprocessing.
            StackEntry stackEntry =
                new StackEntry(
                    liaNode,
                    node,
                    sink,
                    rmem,
                    nodeMem,
                    smems,
                    smemIndex,
                    trgTuples,
                    visitedRules,
                    false);
            stack.add(stackEntry);
          }
        }

        switch (node.getType()) {
          case NodeTypeEnums.JoinNode:
            {
              pJoinNode.doNode(
                  (JoinNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples);
              break;
            }
          case NodeTypeEnums.NotNode:
            {
              pNotNode.doNode((NotNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples);
              break;
            }
          case NodeTypeEnums.ExistsNode:
            {
              pExistsNode.doNode(
                  (ExistsNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples);
              break;
            }
          case NodeTypeEnums.AccumulateNode:
            {
              pAccNode.doNode(
                  (AccumulateNode) node, sink, am, wm, srcTuples, trgTuples, stagedLeftTuples);
              break;
            }
        }
      } else {
        switch (node.getType()) {
          case NodeTypeEnums.EvalConditionNode:
            {
              pEvalNode.doNode(
                  (EvalConditionNode) node,
                  (EvalMemory) nodeMem,
                  sink,
                  wm,
                  srcTuples,
                  trgTuples,
                  stagedLeftTuples);
              break;
            }
          case NodeTypeEnums.FromNode:
            {
              pFromNode.doNode(
                  (FromNode) node,
                  (FromMemory) nodeMem,
                  sink,
                  wm,
                  srcTuples,
                  trgTuples,
                  stagedLeftTuples);
              break;
            }
          case NodeTypeEnums.QueryElementNode:
            {
              QueryElementNodeMemory qmem = (QueryElementNodeMemory) nodeMem;

              if (srcTuples.isEmpty() && qmem.getResultLeftTuples().isEmpty()) {
                // no point in evaluating query element, and setting up stack, if there is nothing
                // to process
                break;
              }

              QueryElementNode qnode = (QueryElementNode) node;
              if (visitedRules == Collections.<String>emptySet()) {
                visitedRules = new HashSet<String>();
              }
              visitedRules.add(qnode.getQueryElement().getQueryName());

              // result tuples can happen when reactivity occurs inside of the query, prior to
              // evaluation
              // we will need special behaviour to add the results again, when this query result
              // resumes
              trgTuples.addAll(qmem.getResultLeftTuples());

              if (!srcTuples.isEmpty()) {
                // only process the Query Node if there are src tuples
                StackEntry stackEntry =
                    new StackEntry(
                        liaNode,
                        node,
                        sink,
                        rmem,
                        nodeMem,
                        smems,
                        smemIndex,
                        trgTuples,
                        visitedRules,
                        true);

                stack.add(stackEntry);

                pQueryNode.doNode(
                    qnode, (QueryElementNodeMemory) nodeMem, stackEntry, sink, wm, srcTuples);

                SegmentMemory qsmem = ((QueryElementNodeMemory) nodeMem).getQuerySegmentMemory();
                List<PathMemory> qrmems = qsmem.getPathMemories();

                // Build the evaluation information for each 'or' branch
                // Exception fo the last, place each entry on the stack, the last one evaluate now.
                for (int i = qrmems.size() - 1; i >= 0; i--) {
                  PathMemory qrmem = qrmems.get(i);

                  rmem = qrmem;
                  smems = qrmem.getSegmentMemories();
                  smemIndex = 0;
                  smem = smems[smemIndex]; // 0
                  liaNode = (LeftInputAdapterNode) smem.getRootNode();

                  if (liaNode == smem.getTipNode()) {
                    // segment only has liaNode in it
                    // nothing is staged in the liaNode, so skip to next segment
                    smem = smems[++smemIndex]; // 1
                    node = smem.getRootNode();
                    nodeMem = smem.getNodeMemories().getFirst();
                  } else {
                    // lia is in shared segment, so point to next node
                    node = liaNode.getSinkPropagator().getFirstLeftTupleSink();
                    nodeMem =
                        smem.getNodeMemories().getFirst().getNext(); // skip the liaNode memory
                  }

                  trgTuples = smem.getStagedLeftTuples();

                  if (i != 0 && !trgTuples.isEmpty()) {
                    // All entries except the last should be placed on the stack for evaluation
                    // later.
                    stackEntry =
                        new StackEntry(
                            liaNode,
                            node,
                            null,
                            rmem,
                            nodeMem,
                            smems,
                            smemIndex,
                            trgTuples,
                            visitedRules,
                            false);
                    if (log.isTraceEnabled()) {
                      int offset = getOffset(stackEntry.getNode());
                      log.trace(
                          "{} ORQueue branch={} {} {}",
                          indent(offset),
                          i,
                          stackEntry.getNode().toString(),
                          trgTuples.toStringSizes());
                    }
                    stack.add(stackEntry);
                  }
                }
                processRian = true; //  make sure it's reset, so ria nodes are processed
                continue;
              }
              break;
            }
          case NodeTypeEnums.ConditionalBranchNode:
            {
              pBranchNode.doNode(
                  (ConditionalBranchNode) node,
                  (ConditionalBranchMemory) nodeMem,
                  sink,
                  wm,
                  srcTuples,
                  trgTuples,
                  stagedLeftTuples,
                  executor);
              break;
            }
        }
      }

      if (node != smem.getTipNode()) {
        // get next node and node memory in the segment
        node = sink;
        nodeMem = nodeMem.getNext();
      } else {
        // Reached end of segment, start on new segment.
        SegmentPropagator.propagate(smem, trgTuples, wm);
        smem = smems[++smemIndex];
        trgTuples = smem.getStagedLeftTuples();
        if (log.isTraceEnabled()) {
          log.trace("Segment {}", smemIndex);
        }
        node = (LeftTupleSink) smem.getRootNode();
        nodeMem = smem.getNodeMemories().getFirst();
      }
      processRian = true; //  make sure it's reset, so ria nodes are processed
    }
  }
Ejemplo n.º 8
0
  public void eval1(
      LeftInputAdapterNode liaNode,
      PathMemory rmem,
      NetworkNode node,
      Memory nodeMem,
      SegmentMemory[] smems,
      int smemIndex,
      LeftTupleSets trgTuples,
      InternalWorkingMemory wm,
      LinkedList<StackEntry> stack,
      Set<String> visitedRules,
      boolean processRian,
      RuleExecutor executor) {
    while (true) {
      eval2(
          liaNode,
          rmem,
          node,
          nodeMem,
          smems,
          smemIndex,
          trgTuples,
          wm,
          stack,
          visitedRules,
          processRian,
          executor);

      // eval
      if (!stack.isEmpty()) {
        StackEntry entry = stack.removeLast();

        node = entry.getNode();
        nodeMem = entry.getNodeMem();
        trgTuples = entry.getTrgTuples();
        if (node.getType() == NodeTypeEnums.QueryElementNode) {
          // copy across the results, if any from the query node memory
          trgTuples.addAll(((QueryElementNodeMemory) nodeMem).getResultLeftTuples());
        }

        LeftTupleSinkNode sink = entry.getSink();
        rmem = entry.getRmem();

        smems = entry.getSmems();
        smemIndex = entry.getSmemIndex();
        visitedRules = entry.getVisitedRules();
        if (NodeTypeEnums.isBetaNode(node)) {
          // queued beta nodes do not want their ria node evaluated, otherwise there is recursion
          processRian = false;
        } else {
          processRian = true;
        }

        if (entry.isResumeFromNextNode()) {
          SegmentMemory smem = smems[smemIndex];
          if (node != smem.getTipNode()) {
            // get next node and node memory in the segment
            LeftTupleSink nextSink = sink.getNextLeftTupleSinkNode();
            if (nextSink == null) {
              node = sink;
            } else {
              // there is a nested subnetwork, take out path
              node = nextSink;
            }

            nodeMem = nodeMem.getNext();
          } else {
            // Reached end of segment, start on new segment.
            SegmentPropagator.propagate(smem, trgTuples, wm);
            smem = smems[++smemIndex];
            trgTuples = smem.getStagedLeftTuples();
            node = (LeftTupleSink) smem.getRootNode();
            nodeMem = smem.getNodeMemories().getFirst();
          }
        }

        if (log.isTraceEnabled()) {
          int offset = getOffset(node);
          log.trace("{} Resume {} {}", indent(offset), node.toString(), trgTuples.toStringSizes());
        }
      } else {
        return; // stack is empty return;
      }
    }
  }