public static void findLeftTupleBlocker( BetaNode betaNode, TupleMemory rtm, ContextEntry[] contextEntry, BetaConstraints constraints, LeftTuple leftTuple, FastIterator it, boolean useLeftMemory) { // This method will also remove rightTuples that are from subnetwork where no leftmemory use // used for (RightTuple rightTuple = betaNode.getFirstRightTuple(leftTuple, rtm, null, it); rightTuple != null; ) { RightTuple nextRight = (RightTuple) it.next(rightTuple); if (constraints.isAllowedCachedLeft(contextEntry, rightTuple.getFactHandle())) { leftTuple.setBlocker(rightTuple); if (useLeftMemory) { rightTuple.addBlocked(leftTuple); break; } else if (betaNode.isRightInputIsRiaNode()) { // If we aren't using leftMemory and the right input is a RIAN, then we must iterate and // find all subetwork right tuples and remove them // so we don't break rtm.remove(rightTuple); } else { break; } } rightTuple = nextRight; } }
public void doLeftDeletes( BetaMemory bm, LeftTupleSets srcLeftTuples, LeftTupleSets trgLeftTuples, LeftTupleSets stagedLeftTuples) { LeftTupleMemory ltm = bm.getLeftTupleMemory(); for (LeftTuple leftTuple = srcLeftTuples.getDeleteFirst(); leftTuple != null; ) { LeftTuple next = leftTuple.getStagedNext(); RightTuple blocker = leftTuple.getBlocker(); if (blocker == null) { if (leftTuple.getMemory() != null) { // it may have been staged and never actually added ltm.remove(leftTuple); } LeftTuple childLeftTuple = leftTuple.getFirstChild(); if (childLeftTuple != null) { // NotNode only has one child childLeftTuple.setPropagationContext(leftTuple.getPropagationContext()); RuleNetworkEvaluator.deleteLeftChild( childLeftTuple, trgLeftTuples, stagedLeftTuples); // no need to update pctx, as no right available, and pctx will // exist on a parent LeftTuple anyway } } else { blocker.removeBlocked(leftTuple); } leftTuple.clearStaged(); leftTuple = next; } }
public void updateSink( final ObjectSink sink, final PropagationContext context, final InternalWorkingMemory workingMemory) { BetaNode betaNode = (BetaNode) this.sink.getSinks()[0]; Memory betaMemory = workingMemory.getNodeMemory(betaNode); BetaMemory bm; if (betaNode.getType() == NodeTypeEnums.AccumulateNode) { bm = ((AccumulateMemory) betaMemory).getBetaMemory(); } else { bm = (BetaMemory) betaMemory; } // for RIA nodes, we need to store the ID of the created handles bm.getRightTupleMemory().iterator(); if (bm.getRightTupleMemory().size() > 0) { final org.drools.core.util.Iterator it = bm.getRightTupleMemory().iterator(); for (RightTuple entry = (RightTuple) it.next(); entry != null; entry = (RightTuple) it.next()) { LeftTuple leftTuple = (LeftTuple) entry.getFactHandle().getObject(); InternalFactHandle handle = (InternalFactHandle) leftTuple.getObject(); sink.assertObject(handle, context, workingMemory); } } }
public static void propagate( LeftTupleSink sink, Tuple leftTuple, RightTuple rightTuple, BetaConstraints betaConstraints, PropagationContext propagationContext, ContextEntry[] context, boolean useLeftMemory, TupleSets<LeftTuple> trgLeftTuples, TupleSets<LeftTuple> stagedLeftTuples) { if (betaConstraints.isAllowedCachedLeft(context, rightTuple.getFactHandle())) { if (rightTuple.getFirstChild() == null) { // this is a new match, so propagate as assert LeftTuple childLeftTuple = sink.createLeftTuple( (LeftTuple) leftTuple, rightTuple, null, null, sink, useLeftMemory); childLeftTuple.setPropagationContext(propagationContext); trgLeftTuples.addInsert(childLeftTuple); } else { LeftTuple childLeftTuple = rightTuple.getFirstChild(); childLeftTuple.setPropagationContext(propagationContext); updateChildLeftTuple(childLeftTuple, stagedLeftTuples, trgLeftTuples); } } else { deleteChildLeftTuple( propagationContext, trgLeftTuples, stagedLeftTuples, rightTuple.getFirstChild()); } }
public static void doUpdatesExistentialReorderLeftMemory( BetaMemory bm, TupleSets<LeftTuple> srcLeftTuples) { TupleMemory ltm = bm.getLeftTupleMemory(); // sides must first be re-ordered, to ensure iteration integrity for (LeftTuple leftTuple = srcLeftTuples.getUpdateFirst(); leftTuple != null; ) { LeftTuple next = leftTuple.getStagedNext(); if (leftTuple.getMemory() != null) { ltm.remove(leftTuple); } leftTuple = next; } for (LeftTuple leftTuple = srcLeftTuples.getUpdateFirst(); leftTuple != null; ) { LeftTuple next = leftTuple.getStagedNext(); RightTuple blocker = leftTuple.getBlocker(); if (blocker == null) { ltm.add(leftTuple); for (LeftTuple childLeftTuple = leftTuple.getFirstChild(); childLeftTuple != null; ) { LeftTuple childNext = childLeftTuple.getHandleNext(); childLeftTuple.reAddRight(); childLeftTuple = childNext; } } else if (blocker.getStagedType() != LeftTuple.NONE) { // it's blocker is also being updated, so remove to force it to start from the beginning blocker.removeBlocked(leftTuple); } leftTuple = next; } }
public Entry[] toArray() { FastIterator it = tree.fastIterator(); if (it == null) { return new Entry[0]; } List<Comparable> toBeRemoved = new ArrayList<Comparable>(); List<RightTuple> result = new ArrayList<RightTuple>(); RBTree.Node<Comparable<Comparable>, RightTupleList> node; while ((node = (RBTree.Node<Comparable<Comparable>, RightTupleList>) it.next(null)) != null) { RightTupleList bucket = node.value; if (bucket.size() == 0) { toBeRemoved.add(node.key); } else { RightTuple entry = bucket.getFirst(); while (entry != null) { result.add(entry); entry = (RightTuple) entry.getNext(); } } } for (Comparable key : toBeRemoved) { tree.delete(key); } return result.toArray(new LeftTuple[result.size()]); }
public static void doUpdatesReorderRightMemory( BetaMemory bm, TupleSets<RightTuple> srcRightTuples) { TupleMemory rtm = bm.getRightTupleMemory(); for (RightTuple rightTuple = srcRightTuples.getUpdateFirst(); rightTuple != null; ) { RightTuple next = rightTuple.getStagedNext(); if (rightTuple.getMemory() != null) { rightTuple.setTempRightTupleMemory(rightTuple.getMemory()); rtm.remove(rightTuple); } rightTuple = next; } for (RightTuple rightTuple = srcRightTuples.getUpdateFirst(); rightTuple != null; ) { RightTuple next = rightTuple.getStagedNext(); if (rightTuple.getTempRightTupleMemory() != null) { rtm.add(rightTuple); for (LeftTuple childLeftTuple = rightTuple.getFirstChild(); childLeftTuple != null; ) { LeftTuple childNext = childLeftTuple.getRightParentNext(); childLeftTuple.reAddLeft(); childLeftTuple = childNext; } } rightTuple = next; } }
private static ProtobufMessages.NodeMemory writeRIANodeMemory( final int nodeId, final MarshallerWriteContext context, final BaseNode node, final NodeMemories memories, final Memory memory) { RightInputAdapterNode riaNode = (RightInputAdapterNode) node; ObjectSink[] sinks = riaNode.getSinkPropagator().getSinks(); BetaNode betaNode = (BetaNode) sinks[0]; Memory betaMemory = memories.peekNodeMemory(betaNode.getId()); if (betaMemory == null) { return null; } BetaMemory bm; if (betaNode.getType() == NodeTypeEnums.AccumulateNode) { bm = ((AccumulateMemory) betaMemory).getBetaMemory(); } else { bm = (BetaMemory) betaMemory; } // for RIA nodes, we need to store the ID of the created handles bm.getRightTupleMemory().iterator(); if (bm.getRightTupleMemory().size() > 0) { ProtobufMessages.NodeMemory.RIANodeMemory.Builder _ria = ProtobufMessages.NodeMemory.RIANodeMemory.newBuilder(); final org.drools.core.util.Iterator it = bm.getRightTupleMemory().iterator(); // iterates over all propagated handles and assert them to the new sink for (RightTuple entry = (RightTuple) it.next(); entry != null; entry = (RightTuple) it.next()) { LeftTuple leftTuple = (LeftTuple) entry.getFactHandle().getObject(); InternalFactHandle handle = (InternalFactHandle) leftTuple.getObject(); FactHandle _handle = ProtobufMessages.FactHandle.newBuilder() .setId(handle.getId()) .setRecency(handle.getRecency()) .build(); _ria.addContext( ProtobufMessages.NodeMemory.RIANodeMemory.RIAContext.newBuilder() .setTuple(PersisterHelper.createTuple(leftTuple)) .setResultHandle(_handle) .build()); } return ProtobufMessages.NodeMemory.newBuilder() .setNodeId(nodeId) .setNodeType(ProtobufMessages.NodeMemory.NodeType.RIA) .setRia(_ria.build()) .build(); } return null; }
public static void unlinkCreatedHandles(final LeftTuple leftTuple) { Map<Object, RightTuple> matches = (Map<Object, RightTuple>) leftTuple.getContextObject(); FastIterator rightIt = LinkedList.fastIterator; for (RightTuple rightTuple : matches.values()) { for (RightTuple current = rightTuple; current != null; ) { RightTuple next = (RightTuple) rightIt.next(current); current.unlinkFromRightParent(); current = next; } } }
public void modifyLeftTuple( LeftTuple leftTuple, PropagationContext context, InternalWorkingMemory workingMemory) { // add it to a memory mapping InternalFactHandle handle = (InternalFactHandle) leftTuple.getObject(); // propagate it for (RightTuple rightTuple = handle.getFirstRightTuple(); rightTuple != null; rightTuple = rightTuple.getHandleNext()) { rightTuple.getRightTupleSink().modifyRightTuple(rightTuple, context, workingMemory); } }
public Entry next(Entry object) { if (object == null) { return null; } RightTuple rightTuple = (RightTuple) object; RightTuple next = (RightTuple) rightTuple.getNext(); if (next != null) { return next; } Comparable key = getRightIndexedValue(rightTuple); next = getNext(key, false); return next == null ? null : checkUpperBound(next, upperBound); }
public static void checkConstraintsAndPropagate( final LeftTupleSink sink, final LeftTuple leftTuple, final RightTuple rightTuple, final AlphaNodeFieldConstraint[] alphaConstraints, final BetaConstraints betaConstraints, final PropagationContext propagationContext, final InternalWorkingMemory wm, final FromMemory fm, final ContextEntry[] context, final boolean useLeftMemory, TupleSets<LeftTuple> trgLeftTuples, TupleSets<LeftTuple> stagedLeftTuples) { if (isAllowed(rightTuple.getFactHandle(), alphaConstraints, wm, fm)) { propagate( sink, leftTuple, rightTuple, betaConstraints, propagationContext, context, useLeftMemory, trgLeftTuples, stagedLeftTuples); } }
private static ProtobufMessages.NodeMemory writeQueryElementNodeMemory( final int nodeId, final Memory memory, final InternalWorkingMemory wm) { org.drools.core.util.Iterator<LeftTuple> it = LeftTupleIterator.iterator(wm, ((QueryElementNodeMemory) memory).getNode()); ProtobufMessages.NodeMemory.QueryElementNodeMemory.Builder _query = ProtobufMessages.NodeMemory.QueryElementNodeMemory.newBuilder(); for (LeftTuple leftTuple = it.next(); leftTuple != null; leftTuple = it.next()) { InternalFactHandle handle = (InternalFactHandle) leftTuple.getObject(); FactHandle _handle = ProtobufMessages.FactHandle.newBuilder() .setId(handle.getId()) .setRecency(handle.getRecency()) .build(); ProtobufMessages.NodeMemory.QueryElementNodeMemory.QueryContext.Builder _context = ProtobufMessages.NodeMemory.QueryElementNodeMemory.QueryContext.newBuilder() .setTuple(PersisterHelper.createTuple(leftTuple)) .setHandle(_handle); LeftTuple childLeftTuple = leftTuple.getFirstChild(); while (childLeftTuple != null) { RightTuple rightParent = childLeftTuple.getRightParent(); _context.addResult( ProtobufMessages.FactHandle.newBuilder() .setId(rightParent.getFactHandle().getId()) .setRecency(rightParent.getFactHandle().getRecency()) .build()); while (childLeftTuple != null && childLeftTuple.getRightParent() == rightParent) { // skip to the next child that has a different right parent childLeftTuple = childLeftTuple.getLeftParentNext(); } } _query.addContext(_context.build()); } return _query.getContextCount() > 0 ? ProtobufMessages.NodeMemory.newBuilder() .setNodeId(nodeId) .setNodeType(ProtobufMessages.NodeMemory.NodeType.QUERY_ELEMENT) .setQueryElement(_query.build()) .build() : null; }
/** Retracts the corresponding tuple by retrieving and retracting the fact created for it */ public void retractLeftTuple( final LeftTuple tuple, final PropagationContext context, final InternalWorkingMemory workingMemory) { // retrieve handle from memory final InternalFactHandle factHandle = (InternalFactHandle) tuple.getObject(); for (RightTuple rightTuple = factHandle.getFirstRightTuple(); rightTuple != null; rightTuple = rightTuple.getHandleNext()) { rightTuple.getRightTupleSink().retractRightTuple(rightTuple, context, workingMemory); } factHandle.clearRightTuples(); for (LeftTuple leftTuple = factHandle.getLastLeftTuple(); leftTuple != null; leftTuple = leftTuple.getLeftParentNext()) { leftTuple.getLeftTupleSink().retractLeftTuple(leftTuple, context, workingMemory); } factHandle.clearLeftTuples(); }
public void removeMemory(InternalWorkingMemory workingMemory) { BetaNode betaNode = (BetaNode) this.sink.getSinks()[0]; Memory betaMemory = workingMemory.getNodeMemory(betaNode); BetaMemory bm; if (betaNode.getType() == NodeTypeEnums.AccumulateNode) { bm = ((AccumulateMemory) betaMemory).getBetaMemory(); } else { bm = (BetaMemory) betaMemory; } if (bm.getRightTupleMemory().size() > 0) { final Iterator it = bm.getRightTupleMemory().iterator(); for (RightTuple entry = (RightTuple) it.next(); entry != null; entry = (RightTuple) it.next()) { LeftTuple leftTuple = (LeftTuple) entry.getFactHandle().getObject(); leftTuple.unlinkFromLeftParent(); leftTuple.unlinkFromRightParent(); } } workingMemory.clearNodeMemory(this); }
@SuppressWarnings("unchecked") private static ProtobufMessages.NodeMemory writeFromNodeMemory( final int nodeId, final Memory memory) { FromMemory fromMemory = (FromMemory) memory; if (fromMemory.betaMemory.getLeftTupleMemory().size() > 0) { ProtobufMessages.NodeMemory.FromNodeMemory.Builder _from = ProtobufMessages.NodeMemory.FromNodeMemory.newBuilder(); final org.drools.core.util.Iterator tupleIter = fromMemory.betaMemory.getLeftTupleMemory().iterator(); for (LeftTuple leftTuple = (LeftTuple) tupleIter.next(); leftTuple != null; leftTuple = (LeftTuple) tupleIter.next()) { Map<Object, RightTuple> matches = (Map<Object, RightTuple>) leftTuple.getObject(); ProtobufMessages.NodeMemory.FromNodeMemory.FromContext.Builder _context = ProtobufMessages.NodeMemory.FromNodeMemory.FromContext.newBuilder() .setTuple(PersisterHelper.createTuple(leftTuple)); for (RightTuple rightTuple : matches.values()) { FactHandle _handle = ProtobufMessages.FactHandle.newBuilder() .setId(rightTuple.getFactHandle().getId()) .setRecency(rightTuple.getFactHandle().getRecency()) .build(); _context.addHandle(_handle); } _from.addContext(_context.build()); } return ProtobufMessages.NodeMemory.newBuilder() .setNodeId(nodeId) .setNodeType(ProtobufMessages.NodeMemory.NodeType.FROM) .setFrom(_from.build()) .build(); } return null; }
public synchronized void gcStreamQueue() { List<TupleEntry> nonNormalizedDeletes = flushStreamQueue(); for (TupleEntry tupleEntry : nonNormalizedDeletes) { if (tupleEntry.getLeftTuple() != null) { LeftTuple leftTuple = tupleEntry.getLeftTuple(); if (leftTuple.getMemory() != null) { if (tupleEntry.getNodeMemory() instanceof BetaMemory) { ((BetaMemory) tupleEntry.getNodeMemory()).getLeftTupleMemory().remove(leftTuple); } else { leftTuple.getMemory().remove(leftTuple); } } } else { RightTuple rightTuple = tupleEntry.getRightTuple(); if (rightTuple.getMemory() != null) { if (tupleEntry.getNodeMemory() instanceof BetaMemory) { ((BetaMemory) tupleEntry.getNodeMemory()).getRightTupleMemory().remove(rightTuple); } else { rightTuple.getMemory().remove(rightTuple); } } } } }
private Comparable getRightIndexedValue(RightTuple rightTuple) { return (Comparable) ascendingIndex.getExtractor().getValue(rightTuple.getFactHandle().getObject()); }
public static void doUpdatesExistentialReorderRightMemory( BetaMemory bm, BetaNode betaNode, TupleSets<RightTuple> srcRightTuples) { TupleMemory rtm = bm.getRightTupleMemory(); boolean resumeFromCurrent = !(betaNode.isIndexedUnificationJoin() || rtm.getIndexType().isComparison()); // remove all the staged rightTuples from the memory before to readd them all // this is to avoid split bucket when an updated rightTuple hasn't been moved yet // and so it is the first entry in the wrong bucket for (RightTuple rightTuple = srcRightTuples.getUpdateFirst(); rightTuple != null; ) { RightTuple next = rightTuple.getStagedNext(); if (rightTuple.getMemory() != null) { rightTuple.setTempRightTupleMemory(rightTuple.getMemory()); if (resumeFromCurrent) { if (rightTuple.getBlocked() != null) { // look for a non-staged right tuple first forward ... RightTuple tempRightTuple = (RightTuple) rightTuple.getNext(); while (tempRightTuple != null && tempRightTuple.getStagedType() != LeftTuple.NONE) { // next cannot be an updated or deleted rightTuple tempRightTuple = (RightTuple) tempRightTuple.getNext(); } // ... and if cannot find one try backward if (tempRightTuple == null) { tempRightTuple = (RightTuple) rightTuple.getPrevious(); while (tempRightTuple != null && tempRightTuple.getStagedType() != LeftTuple.NONE) { // next cannot be an updated or deleted rightTuple tempRightTuple = (RightTuple) tempRightTuple.getPrevious(); } } rightTuple.setTempNextRightTuple(tempRightTuple); } } rightTuple.setTempBlocked(rightTuple.getBlocked()); rightTuple.setBlocked(null); rtm.remove(rightTuple); } rightTuple = next; } for (RightTuple rightTuple = srcRightTuples.getUpdateFirst(); rightTuple != null; ) { RightTuple next = rightTuple.getStagedNext(); if (rightTuple.getTempRightTupleMemory() != null) { rtm.add(rightTuple); if (resumeFromCurrent) { RightTuple tempRightTuple = rightTuple.getTempNextRightTuple(); if (rightTuple.getBlocked() != null && tempRightTuple == null && rightTuple.getMemory() == rightTuple.getTempRightTupleMemory()) { // the next RightTuple was null, but current RightTuple was added back into the same // bucket, so reset as root blocker to re-match can be attempted rightTuple.setTempNextRightTuple(rightTuple); } } for (LeftTuple childLeftTuple = rightTuple.getFirstChild(); childLeftTuple != null; ) { LeftTuple childNext = childLeftTuple.getRightParentNext(); childLeftTuple.reAddLeft(); childLeftTuple = childNext; } } rightTuple = next; } }
public void doRightDeletes( NotNode notNode, LeftTupleSink sink, BetaMemory bm, InternalWorkingMemory wm, RightTupleSets srcRightTuples, LeftTupleSets trgLeftTuples) { LeftTupleMemory ltm = bm.getLeftTupleMemory(); RightTupleMemory rtm = bm.getRightTupleMemory(); ContextEntry[] contextEntry = bm.getContext(); BetaConstraints constraints = notNode.getRawConstraints(); for (RightTuple rightTuple = srcRightTuples.getDeleteFirst(); rightTuple != null; ) { RightTuple next = rightTuple.getStagedNext(); FastIterator it = notNode.getRightIterator(rtm); // assign now, so we can remove from memory before doing any possible propagations boolean useComparisonIndex = rtm.getIndexType().isComparison(); RightTuple rootBlocker = useComparisonIndex ? null : (RightTuple) it.next(rightTuple); if (rightTuple.getMemory() != null) { // it may have been staged and never actually added rtm.remove(rightTuple); } if (rightTuple.getBlocked() != null) { for (LeftTuple leftTuple = rightTuple.getBlocked(); leftTuple != null; ) { LeftTuple temp = leftTuple.getBlockedNext(); leftTuple.clearBlocker(); if (leftTuple.getStagedType() == LeftTuple.UPDATE) { // ignore, as it will get processed via left iteration. Children cannot be processed // twice leftTuple = temp; continue; } constraints.updateFromTuple(contextEntry, wm, leftTuple); if (useComparisonIndex) { rootBlocker = rtm.getFirst(leftTuple, null, it); } // we know that older tuples have been checked so continue next for (RightTuple newBlocker = rootBlocker; newBlocker != null; newBlocker = (RightTuple) it.next(newBlocker)) { if (constraints.isAllowedCachedLeft(contextEntry, newBlocker.getFactHandle())) { leftTuple.setBlocker(newBlocker); newBlocker.addBlocked(leftTuple); break; } } if (leftTuple.getBlocker() == null) { // was previous blocked and not in memory, so add ltm.add(leftTuple); trgLeftTuples.addInsert( sink.createLeftTuple(leftTuple, sink, rightTuple.getPropagationContext(), true)); } leftTuple = temp; } } rightTuple.nullBlocked(); rightTuple.clearStaged(); rightTuple = next; } constraints.resetTuple(contextEntry); }
public void eval2( LeftInputAdapterNode liaNode, PathMemory rmem, NetworkNode node, Memory nodeMem, SegmentMemory[] smems, int smemIndex, LeftTupleSets trgTuples, InternalWorkingMemory wm, LinkedList<StackEntry> stack, Set<String> visitedRules, boolean processRian, RuleExecutor executor) { LeftTupleSets srcTuples; SegmentMemory smem = smems[smemIndex]; while (true) { srcTuples = trgTuples; // previous target, is now the source if (log.isTraceEnabled()) { int offset = getOffset(node); log.trace( "{} {} {} {}", indent(offset), ++cycle, node.toString(), srcTuples.toStringSizes()); } if (NodeTypeEnums.isTerminalNode(node)) { TerminalNode rtn = (TerminalNode) node; if (node.getType() == NodeTypeEnums.QueryTerminalNode) { pQtNode.doNode((QueryTerminalNode) rtn, wm, srcTuples, stack); } else { pRtNode.doNode(rtn, wm, srcTuples, executor); } return; } else if (NodeTypeEnums.RightInputAdaterNode == node.getType()) { doRiaNode2(wm, srcTuples, (RightInputAdapterNode) node, stack); return; } LeftTupleSets stagedLeftTuples; if (node == smem.getTipNode() && smem.getFirst() != null) { // we are about to process the segment tip, allow it to merge insert/update/delete clashes // Can happen if the next segments have not yet been initialized stagedLeftTuples = smem.getFirst().getStagedLeftTuples(); } else { stagedLeftTuples = null; } LeftTupleSinkNode sink = ((LeftTupleSource) node).getSinkPropagator().getFirstLeftTupleSink(); trgTuples = new LeftTupleSets(); if (NodeTypeEnums.isBetaNode(node)) { BetaNode betaNode = (BetaNode) node; BetaMemory bm = null; AccumulateMemory am = null; if (NodeTypeEnums.AccumulateNode == node.getType()) { am = (AccumulateMemory) nodeMem; bm = am.getBetaMemory(); } else { bm = (BetaMemory) nodeMem; } if (processRian && betaNode.isRightInputIsRiaNode()) { // if the subnetwork is nested in this segment, it will create srcTuples containing // peer LeftTuples, suitable for the node in the main path. doRiaNode( wm, liaNode, rmem, srcTuples, betaNode, sink, smems, smemIndex, nodeMem, bm, stack, visitedRules, executor); return; // return here is doRiaNode queues the evaluation on the stack, which is necessary // to handled nested query nodes } if (!bm.getDequeu().isEmpty()) { // If there are no staged RightTuples, then process the Dequeue, popping entries, until // another insert/expiration clash RightTupleSets rightTuples = bm.getStagedRightTuples(); if (rightTuples.isEmpty()) { // nothing staged, so now process the Dequeu Deque<RightTuple> que = bm.getDequeu(); while (!que.isEmpty()) { RightTuple rightTuple = que.peekFirst(); if (rightTuple.getPropagationContext().getType() == PropagationContext.EXPIRATION && // Cannot pop an expired fact, if the insert/update has not yet been evaluated. rightTuple.getStagedType() != LeftTuple.NONE) { break; } switch (rightTuple.getPropagationContext().getType()) { case PropagationContext.INSERTION: case PropagationContext.RULE_ADDITION: rightTuples.addInsert(rightTuple); break; case PropagationContext.MODIFICATION: rightTuples.addUpdate(rightTuple); break; case PropagationContext.DELETION: case PropagationContext.EXPIRATION: case PropagationContext.RULE_REMOVAL: rightTuples.addDelete(rightTuple); break; } que.removeFirst(); } } if (!bm.getDequeu().isEmpty()) { // The DeQue is not empty, add StackEntry for reprocessing. StackEntry stackEntry = new StackEntry( liaNode, node, sink, rmem, nodeMem, smems, smemIndex, trgTuples, visitedRules, false); stack.add(stackEntry); } } switch (node.getType()) { case NodeTypeEnums.JoinNode: { pJoinNode.doNode( (JoinNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.NotNode: { pNotNode.doNode((NotNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.ExistsNode: { pExistsNode.doNode( (ExistsNode) node, sink, bm, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.AccumulateNode: { pAccNode.doNode( (AccumulateNode) node, sink, am, wm, srcTuples, trgTuples, stagedLeftTuples); break; } } } else { switch (node.getType()) { case NodeTypeEnums.EvalConditionNode: { pEvalNode.doNode( (EvalConditionNode) node, (EvalMemory) nodeMem, sink, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.FromNode: { pFromNode.doNode( (FromNode) node, (FromMemory) nodeMem, sink, wm, srcTuples, trgTuples, stagedLeftTuples); break; } case NodeTypeEnums.QueryElementNode: { QueryElementNodeMemory qmem = (QueryElementNodeMemory) nodeMem; if (srcTuples.isEmpty() && qmem.getResultLeftTuples().isEmpty()) { // no point in evaluating query element, and setting up stack, if there is nothing // to process break; } QueryElementNode qnode = (QueryElementNode) node; if (visitedRules == Collections.<String>emptySet()) { visitedRules = new HashSet<String>(); } visitedRules.add(qnode.getQueryElement().getQueryName()); // result tuples can happen when reactivity occurs inside of the query, prior to // evaluation // we will need special behaviour to add the results again, when this query result // resumes trgTuples.addAll(qmem.getResultLeftTuples()); if (!srcTuples.isEmpty()) { // only process the Query Node if there are src tuples StackEntry stackEntry = new StackEntry( liaNode, node, sink, rmem, nodeMem, smems, smemIndex, trgTuples, visitedRules, true); stack.add(stackEntry); pQueryNode.doNode( qnode, (QueryElementNodeMemory) nodeMem, stackEntry, sink, wm, srcTuples); SegmentMemory qsmem = ((QueryElementNodeMemory) nodeMem).getQuerySegmentMemory(); List<PathMemory> qrmems = qsmem.getPathMemories(); // Build the evaluation information for each 'or' branch // Exception fo the last, place each entry on the stack, the last one evaluate now. for (int i = qrmems.size() - 1; i >= 0; i--) { PathMemory qrmem = qrmems.get(i); rmem = qrmem; smems = qrmem.getSegmentMemories(); smemIndex = 0; smem = smems[smemIndex]; // 0 liaNode = (LeftInputAdapterNode) smem.getRootNode(); if (liaNode == smem.getTipNode()) { // segment only has liaNode in it // nothing is staged in the liaNode, so skip to next segment smem = smems[++smemIndex]; // 1 node = smem.getRootNode(); nodeMem = smem.getNodeMemories().getFirst(); } else { // lia is in shared segment, so point to next node node = liaNode.getSinkPropagator().getFirstLeftTupleSink(); nodeMem = smem.getNodeMemories().getFirst().getNext(); // skip the liaNode memory } trgTuples = smem.getStagedLeftTuples(); if (i != 0 && !trgTuples.isEmpty()) { // All entries except the last should be placed on the stack for evaluation // later. stackEntry = new StackEntry( liaNode, node, null, rmem, nodeMem, smems, smemIndex, trgTuples, visitedRules, false); if (log.isTraceEnabled()) { int offset = getOffset(stackEntry.getNode()); log.trace( "{} ORQueue branch={} {} {}", indent(offset), i, stackEntry.getNode().toString(), trgTuples.toStringSizes()); } stack.add(stackEntry); } } processRian = true; // make sure it's reset, so ria nodes are processed continue; } break; } case NodeTypeEnums.ConditionalBranchNode: { pBranchNode.doNode( (ConditionalBranchNode) node, (ConditionalBranchMemory) nodeMem, sink, wm, srcTuples, trgTuples, stagedLeftTuples, executor); break; } } } if (node != smem.getTipNode()) { // get next node and node memory in the segment node = sink; nodeMem = nodeMem.getNext(); } else { // Reached end of segment, start on new segment. SegmentPropagator.propagate(smem, trgTuples, wm); smem = smems[++smemIndex]; trgTuples = smem.getStagedLeftTuples(); if (log.isTraceEnabled()) { log.trace("Segment {}", smemIndex); } node = (LeftTupleSink) smem.getRootNode(); nodeMem = smem.getNodeMemories().getFirst(); } processRian = true; // make sure it's reset, so ria nodes are processed } }
public void doRightUpdates( NotNode notNode, LeftTupleSink sink, BetaMemory bm, InternalWorkingMemory wm, RightTupleSets srcRightTuples, LeftTupleSets trgLeftTuples, LeftTupleSets stagedLeftTuples) { LeftTupleMemory ltm = bm.getLeftTupleMemory(); RightTupleMemory rtm = bm.getRightTupleMemory(); ContextEntry[] contextEntry = bm.getContext(); BetaConstraints constraints = notNode.getRawConstraints(); boolean iterateFromStart = notNode.isIndexedUnificationJoin() || rtm.getIndexType().isComparison(); for (RightTuple rightTuple = srcRightTuples.getUpdateFirst(); rightTuple != null; ) { RightTuple next = rightTuple.getStagedNext(); PropagationContext context = rightTuple.getPropagationContext(); constraints.updateFromFactHandle(contextEntry, wm, rightTuple.getFactHandle()); FastIterator leftIt = notNode.getLeftIterator(ltm); LeftTuple firstLeftTuple = notNode.getFirstLeftTuple(rightTuple, ltm, context, leftIt); LeftTuple firstBlocked = rightTuple.getTempBlocked(); // first process non-blocked tuples, as we know only those ones are in the left memory. for (LeftTuple leftTuple = firstLeftTuple; leftTuple != null; ) { // preserve next now, in case we remove this leftTuple LeftTuple temp = (LeftTuple) leftIt.next(leftTuple); if (leftTuple.getStagedType() == LeftTuple.UPDATE) { // ignore, as it will get processed via left iteration. Children cannot be processed twice leftTuple = temp; continue; } // we know that only unblocked LeftTuples are still in the memory if (constraints.isAllowedCachedRight(contextEntry, leftTuple)) { leftTuple.setBlocker(rightTuple); rightTuple.addBlocked(leftTuple); // this is now blocked so remove from memory ltm.remove(leftTuple); LeftTuple childLeftTuple = leftTuple.getFirstChild(); if (childLeftTuple != null) { childLeftTuple.setPropagationContext(rightTuple.getPropagationContext()); RuleNetworkEvaluator.deleteRightChild(childLeftTuple, trgLeftTuples, stagedLeftTuples); } } leftTuple = temp; } if (firstBlocked != null) { RightTuple rootBlocker = rightTuple.getTempNextRightTuple(); if (rootBlocker == null) { iterateFromStart = true; } FastIterator rightIt = notNode.getRightIterator(rtm); // iterate all the existing previous blocked LeftTuples for (LeftTuple leftTuple = firstBlocked; leftTuple != null; ) { LeftTuple temp = leftTuple.getBlockedNext(); leftTuple.clearBlocker(); if (leftTuple.getStagedType() == LeftTuple.UPDATE) { // ignore, as it will get processed via left iteration. Children cannot be processed // twice // but need to add it back into list first leftTuple.setBlocker(rightTuple); rightTuple.addBlocked(leftTuple); leftTuple = temp; continue; } constraints.updateFromTuple(contextEntry, wm, leftTuple); if (iterateFromStart) { rootBlocker = notNode.getFirstRightTuple(leftTuple, rtm, null, rightIt); } // we know that older tuples have been checked so continue next for (RightTuple newBlocker = rootBlocker; newBlocker != null; newBlocker = (RightTuple) rightIt.next(newBlocker)) { // cannot select a RightTuple queued in the delete list // There may be UPDATE RightTuples too, but that's ok. They've already been re-added to // the correct bucket, safe to be reprocessed. if (leftTuple.getStagedType() != LeftTuple.DELETE && newBlocker.getStagedType() != LeftTuple.DELETE && constraints.isAllowedCachedLeft(contextEntry, newBlocker.getFactHandle())) { leftTuple.setBlocker(newBlocker); newBlocker.addBlocked(leftTuple); break; } } if (leftTuple.getBlocker() == null) { // was previous blocked and not in memory, so add ltm.add(leftTuple); // subclasses like ForallNotNode might override this propagation trgLeftTuples.addInsert( sink.createLeftTuple(leftTuple, sink, rightTuple.getPropagationContext(), true)); } leftTuple = temp; } } rightTuple.clearStaged(); rightTuple = next; } constraints.resetFactHandle(contextEntry); constraints.resetTuple(contextEntry); }
public void doLeftUpdates( NotNode notNode, LeftTupleSink sink, BetaMemory bm, InternalWorkingMemory wm, LeftTupleSets srcLeftTuples, LeftTupleSets trgLeftTuples, LeftTupleSets stagedLeftTuples) { LeftTupleMemory ltm = bm.getLeftTupleMemory(); RightTupleMemory rtm = bm.getRightTupleMemory(); ContextEntry[] contextEntry = bm.getContext(); BetaConstraints constraints = notNode.getRawConstraints(); boolean leftUpdateOptimizationAllowed = notNode.isLeftUpdateOptimizationAllowed(); for (LeftTuple leftTuple = srcLeftTuples.getUpdateFirst(); leftTuple != null; ) { LeftTuple next = leftTuple.getStagedNext(); FastIterator rightIt = notNode.getRightIterator(rtm); RightTuple firstRightTuple = notNode.getFirstRightTuple(leftTuple, rtm, null, rightIt); // If in memory, remove it, because we'll need to add it anyway if it's not blocked, to ensure // iteration order RightTuple blocker = leftTuple.getBlocker(); if (blocker == null) { if (leftTuple.getMemory() != null) { // memory can be null, if blocker was deleted in same do loop ltm.remove(leftTuple); } } else { // check if we changed bucket if (rtm.isIndexed() && !rightIt.isFullIterator()) { // if newRightTuple is null, we assume there was a bucket change and that bucket is empty if (firstRightTuple == null || firstRightTuple.getMemory() != blocker.getMemory()) { blocker.removeBlocked(leftTuple); blocker = null; } } } constraints.updateFromTuple(contextEntry, wm, leftTuple); if (!leftUpdateOptimizationAllowed && blocker != null) { blocker.removeBlocked(leftTuple); blocker = null; } // if we where not blocked before (or changed buckets), or the previous blocker no longer // blocks, then find the next blocker if (blocker == null || !constraints.isAllowedCachedLeft(contextEntry, blocker.getFactHandle())) { if (blocker != null) { // remove previous blocker if it exists, as we know it doesn't block any more blocker.removeBlocked(leftTuple); } // find first blocker, because it's a modify, we need to start from the beginning again for (RightTuple newBlocker = firstRightTuple; newBlocker != null; newBlocker = (RightTuple) rightIt.next(newBlocker)) { if (constraints.isAllowedCachedLeft(contextEntry, newBlocker.getFactHandle())) { leftTuple.setBlocker(newBlocker); newBlocker.addBlocked(leftTuple); break; } } LeftTuple childLeftTuple = leftTuple.getFirstChild(); if (leftTuple.getBlocker() != null) { // blocked if (childLeftTuple != null) { // blocked, with previous children, so must have not been previously blocked, so retract // no need to remove, as we removed at the start // to be matched against, as it's now blocked childLeftTuple.setPropagationContext( leftTuple .getBlocker() .getPropagationContext()); // we have the righttuple, so use it for the pctx RuleNetworkEvaluator.deleteLeftChild(childLeftTuple, trgLeftTuples, stagedLeftTuples); } // else: it's blocked now and no children so blocked before, thus do nothing } else if (childLeftTuple == null) { // not blocked, with no children, must have been previously blocked so assert ltm.add(leftTuple); // add to memory so other fact handles can attempt to match trgLeftTuples.addInsert( sink.createLeftTuple( leftTuple, sink, leftTuple.getPropagationContext(), true)); // use leftTuple for the pctx here, as the right one is not available // this won't cause a problem, as the trigger tuple (to the left) will be more recent // anwyay } else { updateChildLeftTuple(childLeftTuple, stagedLeftTuples, trgLeftTuples); // not blocked, with children, so wasn't previous blocked and still isn't so modify ltm.add(leftTuple); // add to memory so other fact handles can attempt to match childLeftTuple.reAddLeft(); } } leftTuple.clearStaged(); leftTuple = next; } constraints.resetTuple(contextEntry); }
private void doRiaNode2( InternalWorkingMemory wm, TupleSets<LeftTuple> srcTuples, RightInputAdapterNode riaNode) { ObjectSink[] sinks = riaNode.getSinkPropagator().getSinks(); BetaNode betaNode = (BetaNode) sinks[0]; BetaMemory bm; Memory nodeMem = wm.getNodeMemory(betaNode); if (NodeTypeEnums.AccumulateNode == betaNode.getType()) { bm = ((AccumulateMemory) nodeMem).getBetaMemory(); } else { bm = (BetaMemory) nodeMem; } // Build up iteration array for other sinks BetaNode[] bns = null; BetaMemory[] bms = null; int length = sinks.length; if (length > 1) { bns = new BetaNode[sinks.length - 1]; bms = new BetaMemory[sinks.length - 1]; for (int i = 1; i < length; i++) { bns[i - 1] = (BetaNode) sinks[i]; Memory nodeMem2 = wm.getNodeMemory(bns[i - 1]); if (NodeTypeEnums.AccumulateNode == betaNode.getType()) { bms[i - 1] = ((AccumulateMemory) nodeMem2).getBetaMemory(); } else { bms[i - 1] = (BetaMemory) nodeMem2; } } } length--; // subtract one, as first is not in the array; for (LeftTuple leftTuple = srcTuples.getInsertFirst(); leftTuple != null; ) { LeftTuple next = leftTuple.getStagedNext(); PropagationContext pctx = leftTuple.getPropagationContext(); InternalFactHandle handle = riaNode.createFactHandle(leftTuple, pctx, wm); RightTuple rightTuple = new RightTupleImpl(handle, betaNode); leftTuple.setContextObject(handle); rightTuple.setPropagationContext(pctx); if (bm.getStagedRightTuples().isEmpty()) { bm.setNodeDirtyWithoutNotify(); } bm.getStagedRightTuples().addInsert(rightTuple); if (bns != null) { // Add peered RightTuples, they are attached to FH - unlink LeftTuples that has a peer ref for (int i = 0; i < length; i++) { rightTuple = new RightTupleImpl(handle, bns[i]); rightTuple.setPropagationContext(pctx); if (bms[i].getStagedRightTuples().isEmpty()) { bms[i].setNodeDirtyWithoutNotify(); } bms[i].getStagedRightTuples().addInsert(rightTuple); } } leftTuple.clearStaged(); leftTuple = next; } for (LeftTuple leftTuple = srcTuples.getDeleteFirst(); leftTuple != null; ) { LeftTuple next = leftTuple.getStagedNext(); InternalFactHandle handle = (InternalFactHandle) leftTuple.getContextObject(); RightTuple rightTuple = handle.getFirstRightTuple(); TupleSets<RightTuple> rightTuples = bm.getStagedRightTuples(); if (rightTuples.isEmpty()) { bm.setNodeDirtyWithoutNotify(); } rightTuples.addDelete(rightTuple); if (bns != null) { // Add peered RightTuples, they are attached to FH - unlink LeftTuples that has a peer ref for (int i = 0; i < length; i++) { rightTuple = rightTuple.getHandleNext(); rightTuples = bms[i].getStagedRightTuples(); if (rightTuples.isEmpty()) { bms[i].setNodeDirtyWithoutNotify(); } rightTuples.addDelete(rightTuple); } } leftTuple.clearStaged(); leftTuple = next; } for (LeftTuple leftTuple = srcTuples.getUpdateFirst(); leftTuple != null; ) { LeftTuple next = leftTuple.getStagedNext(); InternalFactHandle handle = (InternalFactHandle) leftTuple.getContextObject(); RightTuple rightTuple = handle.getFirstRightTuple(); TupleSets<RightTuple> rightTuples = bm.getStagedRightTuples(); if (rightTuples.isEmpty()) { bm.setNodeDirtyWithoutNotify(); } rightTuples.addUpdate(rightTuple); if (bns != null) { // Add peered RightTuples, they are attached to FH - unlink LeftTuples that has a peer ref for (int i = 0; i < length; i++) { rightTuple = rightTuple.getHandleNext(); rightTuples = bms[i].getStagedRightTuples(); if (rightTuples.isEmpty()) { bms[i].setNodeDirtyWithoutNotify(); } rightTuples.addUpdate(rightTuple); } } leftTuple.clearStaged(); leftTuple = next; } srcTuples.resetAll(); }
public void remove(RightTuple tuple) { tuple.getMemory().remove(tuple); size--; }
public void doLeftUpdates( FromNode fromNode, FromMemory fm, LeftTupleSink sink, InternalWorkingMemory wm, TupleSets<LeftTuple> srcLeftTuples, TupleSets<LeftTuple> trgLeftTuples, TupleSets<LeftTuple> stagedLeftTuples) { BetaMemory bm = fm.getBetaMemory(); ContextEntry[] context = bm.getContext(); BetaConstraints betaConstraints = fromNode.getBetaConstraints(); AlphaNodeFieldConstraint[] alphaConstraints = fromNode.getAlphaConstraints(); DataProvider dataProvider = fromNode.getDataProvider(); Class<?> resultClass = fromNode.getResultClass(); for (LeftTuple leftTuple = srcLeftTuples.getUpdateFirst(); leftTuple != null; ) { LeftTuple next = leftTuple.getStagedNext(); PropagationContext propagationContext = leftTuple.getPropagationContext(); final Map<Object, RightTuple> previousMatches = (Map<Object, RightTuple>) leftTuple.getContextObject(); final Map<Object, RightTuple> newMatches = new HashMap<Object, RightTuple>(); leftTuple.setContextObject(newMatches); betaConstraints.updateFromTuple(context, wm, leftTuple); FastIterator rightIt = LinkedList.fastIterator; for (final java.util.Iterator<?> it = dataProvider.getResults(leftTuple, wm, propagationContext, fm.providerContext); it.hasNext(); ) { final Object object = it.next(); if ((object == null) || !resultClass.isAssignableFrom(object.getClass())) { continue; // skip anything if it not assignable } RightTuple rightTuple = previousMatches.remove(object); if (rightTuple == null) { // new match, propagate assert rightTuple = fromNode.createRightTuple(leftTuple, propagationContext, wm, object); } else { // previous match, so reevaluate and propagate modify if (rightIt.next(rightTuple) != null) { // handle the odd case where more than one object has the same hashcode/equals value previousMatches.put(object, (RightTuple) rightIt.next(rightTuple)); rightTuple.setNext(null); } } checkConstraintsAndPropagate( sink, leftTuple, rightTuple, alphaConstraints, betaConstraints, propagationContext, wm, fm, context, true, trgLeftTuples, null); fromNode.addToCreatedHandlesMap(newMatches, rightTuple); } for (RightTuple rightTuple : previousMatches.values()) { for (RightTuple current = rightTuple; current != null; current = (RightTuple) rightIt.next(current)) { deleteChildLeftTuple( propagationContext, trgLeftTuples, stagedLeftTuples, current.getFirstChild()); } } leftTuple.clearStaged(); leftTuple = next; } betaConstraints.resetTuple(context); }
public void doRightInserts( NotNode notNode, BetaMemory bm, InternalWorkingMemory wm, RightTupleSets srcRightTuples, LeftTupleSets trgLeftTuples, LeftTupleSets stagedLeftTuples) { LeftTupleMemory ltm = bm.getLeftTupleMemory(); RightTupleMemory rtm = bm.getRightTupleMemory(); ContextEntry[] contextEntry = bm.getContext(); BetaConstraints constraints = notNode.getRawConstraints(); // this must be processed here, rather than initial insert, as we need to link the blocker unlinkNotNodeOnRightInsert(notNode, bm, wm); for (RightTuple rightTuple = srcRightTuples.getInsertFirst(); rightTuple != null; ) { RightTuple next = rightTuple.getStagedNext(); rtm.add(rightTuple); if (ltm == null || ltm.size() == 0) { // do nothing here, as no left memory rightTuple.clearStaged(); rightTuple = next; continue; } FastIterator it = notNode.getLeftIterator(ltm); PropagationContext context = rightTuple.getPropagationContext(); constraints.updateFromFactHandle(contextEntry, wm, rightTuple.getFactHandle()); for (LeftTuple leftTuple = notNode.getFirstLeftTuple(rightTuple, ltm, context, it); leftTuple != null; ) { // preserve next now, in case we remove this leftTuple LeftTuple temp = (LeftTuple) it.next(leftTuple); if (leftTuple.getStagedType() == LeftTuple.UPDATE) { // ignore, as it will get processed via left iteration. Children cannot be processed twice leftTuple = temp; continue; } // we know that only unblocked LeftTuples are still in the memory if (constraints.isAllowedCachedRight(contextEntry, leftTuple)) { leftTuple.setBlocker(rightTuple); rightTuple.addBlocked(leftTuple); // this is now blocked so remove from memory ltm.remove(leftTuple); // subclasses like ForallNotNode might override this propagation // ** @TODO (mdp) need to not break forall LeftTuple childLeftTuple = leftTuple.getFirstChild(); if (childLeftTuple != null) { // NotNode only has one child childLeftTuple.setPropagationContext(rightTuple.getPropagationContext()); RuleNetworkEvaluator.deleteLeftChild(childLeftTuple, trgLeftTuples, stagedLeftTuples); } } leftTuple = temp; } rightTuple.clearStaged(); rightTuple = next; } constraints.resetFactHandle(contextEntry); }
@Test public void testRestract() { final PropagationContext context = pctxFactory.createPropagationContext(0, PropagationContext.INSERTION, null, null, null); final StatefulKnowledgeSessionImpl workingMemory = new StatefulKnowledgeSessionImpl( 1L, (InternalKnowledgeBase) KnowledgeBaseFactory.newKnowledgeBase()); final ClassFieldReader extractor = store.getReader(Cheese.class, "type"); final MvelConstraint constraint = new MvelConstraintTestUtil( "type == \"stilton\"", FieldFactory.getInstance().getFieldValue("stilton"), extractor); final List list = new ArrayList(); final Cheese cheese1 = new Cheese("stilton", 5); final Cheese cheese2 = new Cheese("stilton", 15); list.add(cheese1); list.add(cheese2); final MockDataProvider dataProvider = new MockDataProvider(list); final Pattern pattern = new Pattern(0, new ClassObjectType(Cheese.class)); From fromCe = new From(dataProvider); fromCe.setResultPattern(pattern); final ReteFromNode from = new ReteFromNode( 3, dataProvider, new MockTupleSource(30), new AlphaNodeFieldConstraint[] {constraint}, null, true, buildContext, fromCe); final MockLeftTupleSink sink = new MockLeftTupleSink(5); from.addTupleSink(sink); final List asserted = sink.getAsserted(); final Person person1 = new Person("xxx2", 30); final FactHandle person1Handle = workingMemory.insert(person1); final LeftTuple tuple = new LeftTupleImpl((DefaultFactHandle) person1Handle, from, true); from.assertLeftTuple(tuple, context, workingMemory); assertEquals(2, asserted.size()); final FromMemory memory = (FromMemory) workingMemory.getNodeMemory(from); assertEquals(1, memory.getBetaMemory().getLeftTupleMemory().size()); assertNull(memory.getBetaMemory().getRightTupleMemory()); RightTuple rightTuple2 = tuple.getFirstChild().getRightParent(); RightTuple rightTuple1 = tuple.getFirstChild().getHandleNext().getRightParent(); assertFalse(rightTuple1.equals(rightTuple2)); assertNull(tuple.getFirstChild().getHandleNext().getHandleNext()); final InternalFactHandle handle2 = rightTuple2.getFactHandle(); final InternalFactHandle handle1 = rightTuple1.getFactHandle(); assertEquals(handle1.getObject(), cheese2); assertEquals(handle2.getObject(), cheese1); from.retractLeftTuple(tuple, context, workingMemory); assertEquals(0, memory.getBetaMemory().getLeftTupleMemory().size()); assertNull(memory.getBetaMemory().getRightTupleMemory()); }