public void doRightDeletes( NotNode notNode, LeftTupleSink sink, BetaMemory bm, InternalWorkingMemory wm, RightTupleSets srcRightTuples, LeftTupleSets trgLeftTuples) { LeftTupleMemory ltm = bm.getLeftTupleMemory(); RightTupleMemory rtm = bm.getRightTupleMemory(); ContextEntry[] contextEntry = bm.getContext(); BetaConstraints constraints = notNode.getRawConstraints(); for (RightTuple rightTuple = srcRightTuples.getDeleteFirst(); rightTuple != null; ) { RightTuple next = rightTuple.getStagedNext(); FastIterator it = notNode.getRightIterator(rtm); // assign now, so we can remove from memory before doing any possible propagations boolean useComparisonIndex = rtm.getIndexType().isComparison(); RightTuple rootBlocker = useComparisonIndex ? null : (RightTuple) it.next(rightTuple); if (rightTuple.getMemory() != null) { // it may have been staged and never actually added rtm.remove(rightTuple); } if (rightTuple.getBlocked() != null) { for (LeftTuple leftTuple = rightTuple.getBlocked(); leftTuple != null; ) { LeftTuple temp = leftTuple.getBlockedNext(); leftTuple.clearBlocker(); if (leftTuple.getStagedType() == LeftTuple.UPDATE) { // ignore, as it will get processed via left iteration. Children cannot be processed // twice leftTuple = temp; continue; } constraints.updateFromTuple(contextEntry, wm, leftTuple); if (useComparisonIndex) { rootBlocker = rtm.getFirst(leftTuple, null, it); } // we know that older tuples have been checked so continue next for (RightTuple newBlocker = rootBlocker; newBlocker != null; newBlocker = (RightTuple) it.next(newBlocker)) { if (constraints.isAllowedCachedLeft(contextEntry, newBlocker.getFactHandle())) { leftTuple.setBlocker(newBlocker); newBlocker.addBlocked(leftTuple); break; } } if (leftTuple.getBlocker() == null) { // was previous blocked and not in memory, so add ltm.add(leftTuple); trgLeftTuples.addInsert( sink.createLeftTuple(leftTuple, sink, rightTuple.getPropagationContext(), true)); } leftTuple = temp; } } rightTuple.nullBlocked(); rightTuple.clearStaged(); rightTuple = next; } constraints.resetTuple(contextEntry); }
public void doRightInserts( NotNode notNode, BetaMemory bm, InternalWorkingMemory wm, RightTupleSets srcRightTuples, LeftTupleSets trgLeftTuples, LeftTupleSets stagedLeftTuples) { LeftTupleMemory ltm = bm.getLeftTupleMemory(); RightTupleMemory rtm = bm.getRightTupleMemory(); ContextEntry[] contextEntry = bm.getContext(); BetaConstraints constraints = notNode.getRawConstraints(); // this must be processed here, rather than initial insert, as we need to link the blocker unlinkNotNodeOnRightInsert(notNode, bm, wm); for (RightTuple rightTuple = srcRightTuples.getInsertFirst(); rightTuple != null; ) { RightTuple next = rightTuple.getStagedNext(); rtm.add(rightTuple); if (ltm == null || ltm.size() == 0) { // do nothing here, as no left memory rightTuple.clearStaged(); rightTuple = next; continue; } FastIterator it = notNode.getLeftIterator(ltm); PropagationContext context = rightTuple.getPropagationContext(); constraints.updateFromFactHandle(contextEntry, wm, rightTuple.getFactHandle()); for (LeftTuple leftTuple = notNode.getFirstLeftTuple(rightTuple, ltm, context, it); leftTuple != null; ) { // preserve next now, in case we remove this leftTuple LeftTuple temp = (LeftTuple) it.next(leftTuple); if (leftTuple.getStagedType() == LeftTuple.UPDATE) { // ignore, as it will get processed via left iteration. Children cannot be processed twice leftTuple = temp; continue; } // we know that only unblocked LeftTuples are still in the memory if (constraints.isAllowedCachedRight(contextEntry, leftTuple)) { leftTuple.setBlocker(rightTuple); rightTuple.addBlocked(leftTuple); // this is now blocked so remove from memory ltm.remove(leftTuple); // subclasses like ForallNotNode might override this propagation // ** @TODO (mdp) need to not break forall LeftTuple childLeftTuple = leftTuple.getFirstChild(); if (childLeftTuple != null) { // NotNode only has one child childLeftTuple.setPropagationContext(rightTuple.getPropagationContext()); RuleNetworkEvaluator.deleteLeftChild(childLeftTuple, trgLeftTuples, stagedLeftTuples); } } leftTuple = temp; } rightTuple.clearStaged(); rightTuple = next; } constraints.resetFactHandle(contextEntry); }
public void doRightUpdates( NotNode notNode, LeftTupleSink sink, BetaMemory bm, InternalWorkingMemory wm, RightTupleSets srcRightTuples, LeftTupleSets trgLeftTuples, LeftTupleSets stagedLeftTuples) { LeftTupleMemory ltm = bm.getLeftTupleMemory(); RightTupleMemory rtm = bm.getRightTupleMemory(); ContextEntry[] contextEntry = bm.getContext(); BetaConstraints constraints = notNode.getRawConstraints(); boolean iterateFromStart = notNode.isIndexedUnificationJoin() || rtm.getIndexType().isComparison(); for (RightTuple rightTuple = srcRightTuples.getUpdateFirst(); rightTuple != null; ) { RightTuple next = rightTuple.getStagedNext(); PropagationContext context = rightTuple.getPropagationContext(); constraints.updateFromFactHandle(contextEntry, wm, rightTuple.getFactHandle()); FastIterator leftIt = notNode.getLeftIterator(ltm); LeftTuple firstLeftTuple = notNode.getFirstLeftTuple(rightTuple, ltm, context, leftIt); LeftTuple firstBlocked = rightTuple.getTempBlocked(); // first process non-blocked tuples, as we know only those ones are in the left memory. for (LeftTuple leftTuple = firstLeftTuple; leftTuple != null; ) { // preserve next now, in case we remove this leftTuple LeftTuple temp = (LeftTuple) leftIt.next(leftTuple); if (leftTuple.getStagedType() == LeftTuple.UPDATE) { // ignore, as it will get processed via left iteration. Children cannot be processed twice leftTuple = temp; continue; } // we know that only unblocked LeftTuples are still in the memory if (constraints.isAllowedCachedRight(contextEntry, leftTuple)) { leftTuple.setBlocker(rightTuple); rightTuple.addBlocked(leftTuple); // this is now blocked so remove from memory ltm.remove(leftTuple); LeftTuple childLeftTuple = leftTuple.getFirstChild(); if (childLeftTuple != null) { childLeftTuple.setPropagationContext(rightTuple.getPropagationContext()); RuleNetworkEvaluator.deleteRightChild(childLeftTuple, trgLeftTuples, stagedLeftTuples); } } leftTuple = temp; } if (firstBlocked != null) { RightTuple rootBlocker = rightTuple.getTempNextRightTuple(); if (rootBlocker == null) { iterateFromStart = true; } FastIterator rightIt = notNode.getRightIterator(rtm); // iterate all the existing previous blocked LeftTuples for (LeftTuple leftTuple = firstBlocked; leftTuple != null; ) { LeftTuple temp = leftTuple.getBlockedNext(); leftTuple.clearBlocker(); if (leftTuple.getStagedType() == LeftTuple.UPDATE) { // ignore, as it will get processed via left iteration. Children cannot be processed // twice // but need to add it back into list first leftTuple.setBlocker(rightTuple); rightTuple.addBlocked(leftTuple); leftTuple = temp; continue; } constraints.updateFromTuple(contextEntry, wm, leftTuple); if (iterateFromStart) { rootBlocker = notNode.getFirstRightTuple(leftTuple, rtm, null, rightIt); } // we know that older tuples have been checked so continue next for (RightTuple newBlocker = rootBlocker; newBlocker != null; newBlocker = (RightTuple) rightIt.next(newBlocker)) { // cannot select a RightTuple queued in the delete list // There may be UPDATE RightTuples too, but that's ok. They've already been re-added to // the correct bucket, safe to be reprocessed. if (leftTuple.getStagedType() != LeftTuple.DELETE && newBlocker.getStagedType() != LeftTuple.DELETE && constraints.isAllowedCachedLeft(contextEntry, newBlocker.getFactHandle())) { leftTuple.setBlocker(newBlocker); newBlocker.addBlocked(leftTuple); break; } } if (leftTuple.getBlocker() == null) { // was previous blocked and not in memory, so add ltm.add(leftTuple); // subclasses like ForallNotNode might override this propagation trgLeftTuples.addInsert( sink.createLeftTuple(leftTuple, sink, rightTuple.getPropagationContext(), true)); } leftTuple = temp; } } rightTuple.clearStaged(); rightTuple = next; } constraints.resetFactHandle(contextEntry); constraints.resetTuple(contextEntry); }
public void eval2( LeftInputAdapterNode liaNode, PathMemory rmem, NetworkNode node, Memory nodeMem, SegmentMemory[] smems, int smemIndex, LeftTupleSets trgTuples, InternalWorkingMemory wm, LinkedList<StackEntry> stack, Set<String> visitedRules, boolean processRian, RuleExecutor executor) { LeftTupleSets srcTuples; SegmentMemory smem = smems[smemIndex]; while (true) { srcTuples = trgTuples; // previous target, is now the source if (log.isTraceEnabled()) { int offset = getOffset(node); log.trace( "{} {} {} {}", indent(offset), ++cycle, node.toString(), srcTuples.toStringSizes()); } if (NodeTypeEnums.isTerminalNode(node)) { TerminalNode rtn = (TerminalNode) node; if (node.getType() == NodeTypeEnums.QueryTerminalNode) { pQtNode.doNode((QueryTerminalNode) rtn, wm, srcTuples, stack); } else { pRtNode.doNode(rtn, wm, srcTuples, executor); } return; } else if (NodeTypeEnums.RightInputAdaterNode == node.getType()) { doRiaNode2(wm, srcTuples, (RightInputAdapterNode) node, stack); return; } LeftTupleSets stagedLeftTuples; if (node == smem.getTipNode() && smem.getFirst() != null) { // we are about to process the segment tip, allow it to merge insert/update/delete clashes // Can happen if the next segments have not yet been initialized stagedLeftTuples = smem.getFirst().getStagedLeftTuples(); } else { stagedLeftTuples = null; } LeftTupleSinkNode sink = ((LeftTupleSource) node).getSinkPropagator().getFirstLeftTupleSink(); trgTuples = new LeftTupleSets(); if (NodeTypeEnums.isBetaNode(node)) { BetaNode betaNode = (BetaNode) node; BetaMemory bm = null; AccumulateMemory am = null; if (NodeTypeEnums.AccumulateNode == node.getType()) { am = (AccumulateMemory) nodeMem; bm = am.getBetaMemory(); } else { bm = (BetaMemory) nodeMem; } if (processRian && betaNode.isRightInputIsRiaNode()) { // if the subnetwork is nested in this segment, it will create srcTuples containing // peer LeftTuples, suitable for the node in the main path. doRiaNode( wm, liaNode, rmem, srcTuples, betaNode, sink, smems, smemIndex, nodeMem, bm, stack, visitedRules, executor); return; // return here is doRiaNode queues the evaluation on the stack, which is necessary // to handled nested query nodes } if (!bm.getDequeu().isEmpty()) { // If there are no staged RightTuples, then process the Dequeue, popping entries, until // another insert/expiration clash RightTupleSets rightTuples = bm.getStagedRightTuples(); if (rightTuples.isEmpty()) { // nothing staged, so now process the Dequeu Deque<RightTuple> que = bm.getDequeu(); while (!que.isEmpty()) { RightTuple rightTuple = que.peekFirst(); if (rightTuple.getPropagationContext().getType() == PropagationContext.EXPIRATION && // Cannot pop an expired fact, if the insert/update has not yet been evaluated. rightTuple.getStagedType() != LeftTuple.NONE) { break; } switch (rightTuple.getPropagationContext().getType()) { case PropagationContext.INSERTION: case PropagationContext.RULE_ADDITION: rightTuples.addInsert(rightTuple); break; case PropagationContext.MODIFICATION: rightTuples.addUpdate(rightTuple); break; case PropagationContext.DELETION: case PropagationContext.EXPIRATION: case PropagationContext.RULE_REMOVAL: rightTuples.addDelete(rightTuple); break; } que.removeFirst(); } } if (!bm.getDequeu().isEmpty()) { // The DeQue is not empty, add StackEntry for reprocessing. StackEntry stackEntry = new StackEntry( liaNode, node, sink, rmem, nodeMem, smems, smemIndex, trgTuples, visitedRules, false); stack.add(stackEntry); } } switch (node.getType()) { case NodeTypeEnums.JoinNode: { pJoinNode.doNode( (JoinNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.NotNode: { pNotNode.doNode((NotNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.ExistsNode: { pExistsNode.doNode( (ExistsNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.AccumulateNode: { pAccNode.doNode( (AccumulateNode) node, sink, am, wm, srcTuples, trgTuples, stagedLeftTuples); break; } } } else { switch (node.getType()) { case NodeTypeEnums.EvalConditionNode: { pEvalNode.doNode( (EvalConditionNode) node, (EvalMemory) nodeMem, sink, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.FromNode: { pFromNode.doNode( (FromNode) node, (FromMemory) nodeMem, sink, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.QueryElementNode: { QueryElementNodeMemory qmem = (QueryElementNodeMemory) nodeMem; if (srcTuples.isEmpty() && qmem.getResultLeftTuples().isEmpty()) { // no point in evaluating query element, and setting up stack, if there is nothing // to process break; } QueryElementNode qnode = (QueryElementNode) node; if (visitedRules == Collections.<String>emptySet()) { visitedRules = new HashSet<String>(); } visitedRules.add(qnode.getQueryElement().getQueryName()); // result tuples can happen when reactivity occurs inside of the query, prior to // evaluation // we will need special behaviour to add the results again, when this query result // resumes trgTuples.addAll(qmem.getResultLeftTuples()); if (!srcTuples.isEmpty()) { // only process the Query Node if there are src tuples StackEntry stackEntry = new StackEntry( liaNode, node, sink, rmem, nodeMem, smems, smemIndex, trgTuples, visitedRules, true); stack.add(stackEntry); pQueryNode.doNode( qnode, (QueryElementNodeMemory) nodeMem, stackEntry, sink, wm, srcTuples); SegmentMemory qsmem = ((QueryElementNodeMemory) nodeMem).getQuerySegmentMemory(); List<PathMemory> qrmems = qsmem.getPathMemories(); // Build the evaluation information for each 'or' branch // Exception fo the last, place each entry on the stack, the last one evaluate now. for (int i = qrmems.size() - 1; i >= 0; i--) { PathMemory qrmem = qrmems.get(i); rmem = qrmem; smems = qrmem.getSegmentMemories(); smemIndex = 0; smem = smems[smemIndex]; // 0 liaNode = (LeftInputAdapterNode) smem.getRootNode(); if (liaNode == smem.getTipNode()) { // segment only has liaNode in it // nothing is staged in the liaNode, so skip to next segment smem = smems[++smemIndex]; // 1 node = smem.getRootNode(); nodeMem = smem.getNodeMemories().getFirst(); } else { // lia is in shared segment, so point to next node node = liaNode.getSinkPropagator().getFirstLeftTupleSink(); nodeMem = smem.getNodeMemories().getFirst().getNext(); // skip the liaNode memory } trgTuples = smem.getStagedLeftTuples(); if (i != 0 && !trgTuples.isEmpty()) { // All entries except the last should be placed on the stack for evaluation // later. stackEntry = new StackEntry( liaNode, node, null, rmem, nodeMem, smems, smemIndex, trgTuples, visitedRules, false); if (log.isTraceEnabled()) { int offset = getOffset(stackEntry.getNode()); log.trace( "{} ORQueue branch={} {} {}", indent(offset), i, stackEntry.getNode().toString(), trgTuples.toStringSizes()); } stack.add(stackEntry); } } processRian = true; // make sure it's reset, so ria nodes are processed continue; } break; } case NodeTypeEnums.ConditionalBranchNode: { pBranchNode.doNode( (ConditionalBranchNode) node, (ConditionalBranchMemory) nodeMem, sink, wm, srcTuples, trgTuples, stagedLeftTuples, executor); break; } } } if (node != smem.getTipNode()) { // get next node and node memory in the segment node = sink; nodeMem = nodeMem.getNext(); } else { // Reached end of segment, start on new segment. SegmentPropagator.propagate(smem, trgTuples, wm); smem = smems[++smemIndex]; trgTuples = smem.getStagedLeftTuples(); if (log.isTraceEnabled()) { log.trace("Segment {}", smemIndex); } node = (LeftTupleSink) smem.getRootNode(); nodeMem = smem.getNodeMemories().getFirst(); } processRian = true; // make sure it's reset, so ria nodes are processed } }