/** * Performs precomputation before actual state elimination. This handles cases in which all or * some states can or have to be treated differently, e.g. because there are no target states at * all, or some states do never reach a target state. * * @return true iff state elimination is necessary to obtain a result */ private boolean precompute() { /* if there are no target states, the result is zero everywhere * for a reachability probability analysis, so all states can be * made absorbing. If we are performing analysis of accumulated * rewards, the value will be infinity everywhere. */ if (!pmc.isUseTime() && !pmc.hasTargetStates()) { for (int state = 0; state < pmc.getNumStates(); state++) { pmc.makeAbsorbing(state); if (pmc.isUseRewards()) { pmc.setReward(state, pmc.getFunctionFactory().getInf()); } } return false; } /* search for states which might never reach a target state and thus * have to be assigned a reward of infinity. */ if (pmc.isUseRewards()) { int[] backStatesArr = collectStatesBackward(); HashSet<Integer> reaching = new HashSet<Integer>(); for (int stateNr = 0; stateNr < backStatesArr.length; stateNr++) { reaching.add(backStatesArr[stateNr]); } for (int state = 0; state < pmc.getNumStates(); state++) { if (!pmc.isUseTime() && !reaching.contains(state)) { pmc.setReward(state, pmc.getFunctionFactory().getInf()); } } } return true; }
/** * Obtain result for a given state. Before calling this method, all states must have been * eliminated. * * @param state state to obtain result for * @return result for given state */ Function getResult(int state) { /* due to state elimination, at this point each state: * A) either has only a self-loop, or * B) has no self loop and only transitions to one or more states * of the form A. */ if (pmc.isUseRewards() && !pmc.isUseTime()) { /* states which do not reach target states with probability one * are assigned a reward of infinity. Target states have a reward * of zero and only self-loops. Because of this, and from the state * elimination (see above), we can read the reward directly from * the according reward structure. */ return pmc.getReward(state); } else if (pmc.isUseRewards() && pmc.isUseTime()) { /* due to state elimination, each state either: A) has a self loop * and or: B) does not have a self-loop and only transitions to * states of the form A. The long-run average probability for states * of the form A is then just reward(state) / time(state). For all * states of both the form A and B, the long-run average is the * probability to move to a state of form A times the long-run * average value of that A state. */ ListIterator<Integer> toStateIter = pmc.transitionTargets.get(state).listIterator(); ListIterator<Function> toProbIter = pmc.transitionProbs.get(state).listIterator(); Function result = pmc.getFunctionFactory().getZero(); while (toStateIter.hasNext()) { int toState = toStateIter.next(); Function toProb = toProbIter.next(); result = result.add(toProb.multiply(pmc.getReward(toState)).divide(pmc.getTime(toState))); } return result; } else { /* due to state elimination, each state either: A) has a self loop * and then is a target state or cannot reach a target state at all, * or: B) is not a target state or a state which cannot reach * target states, and then does not have a self-loop and only * transitions to states of the form A. Because of this, to obtain * reachability probabilities, we just have to add up the one-step * probabilities to target states. */ ListIterator<Integer> toStateIter = pmc.transitionTargets.get(state).listIterator(); ListIterator<Function> toProbIter = pmc.transitionProbs.get(state).listIterator(); Function result = pmc.getFunctionFactory().getZero(); while (toStateIter.hasNext()) { int toState = toStateIter.next(); Function toProb = toProbIter.next(); if (pmc.isTargetState(toState)) { result = result.add(toProb); } } return result; } }
/** * Eliminates a given state * * @param midState state to eliminate */ private void eliminate(int midState) { Function loopProb = pmc.getSelfLoopProb(midState); /* states with only a self-loop require no further treatment */ if (loopProb.equals(pmc.getFunctionFactory().getOne())) { return; } /* slStar = 1/(1-x), where x is the self-loop probability */ Function slStar = loopProb.star(); /* adapt rewards and time spent in state accordingly. The new * values correspond to adding the expected reward/time obtained * from moving to the midState from one of its predecessors, times * the probability of moving. */ if (pmc.isUseRewards()) { pmc.setReward(midState, pmc.getReward(midState).multiply(slStar)); for (int from : pmc.incoming.get(midState)) { if (from != midState) { pmc.setReward( from, pmc.getReward(from) .add(pmc.getTransProb(from, midState).multiply(pmc.getReward(midState)))); } } } if (pmc.isUseTime()) { pmc.setTime(midState, pmc.getTime(midState).multiply(slStar)); for (int from : pmc.incoming.get(midState)) { if (from != midState) { pmc.setTime( from, pmc.getTime(from) .add(pmc.getTransProb(from, midState).multiply(pmc.getTime(midState)))); } } } /* redirect transitions of predecessors of midState. Redirection is * done such that some state fromState will have a probability of * moving to a successor state toState of midState with probability * (<fromState-to-midState-prob> * <midState-to-toState-prob) * / (1-<self-loop-prob>). (If there already was a transition from fromState * to toState, probabilities will be added up.). All transitions to * midState will be removed. */ ArrayList<NewTransition> newTransitions = new ArrayList<NewTransition>(); for (int fromState : pmc.incoming.get(midState)) { if (fromState != midState) { Function fromToMid = pmc.getTransProb(fromState, midState); ListIterator<Integer> toStateIter = pmc.transitionTargets.get(midState).listIterator(); ListIterator<Function> toProbIter = pmc.transitionProbs.get(midState).listIterator(); while (toStateIter.hasNext()) { int toState = toStateIter.next(); Function midToTo = toProbIter.next(); if (toState != midState) { Function fromToToAdd = fromToMid.multiply(slStar.multiply(midToTo)); newTransitions.add(new NewTransition(fromState, toState, fromToToAdd)); } } } } for (int fromState : pmc.incoming.get(midState)) { ListIterator<Integer> toStateIter = pmc.transitionTargets.get(fromState).listIterator(); ListIterator<Function> toProbIter = pmc.transitionProbs.get(fromState).listIterator(); while (toStateIter.hasNext()) { int state = toStateIter.next(); toProbIter.next(); if (state == midState) { toStateIter.remove(); toProbIter.remove(); break; } } } for (NewTransition newTransition : newTransitions) { pmc.addTransition(newTransition.fromState, newTransition.toState, newTransition.prob); } /* remove self loop from state and set outgoing probabilities to * <out-prob> / (1-<self-loop-prob>). This corresponds to the * probability to eventually leaving midState to a specific successor * state, after executing any number of self loops. */ ListIterator<Integer> toStateIter = pmc.transitionTargets.get(midState).listIterator(); ListIterator<Function> toProbIter = pmc.transitionProbs.get(midState).listIterator(); while (toStateIter.hasNext()) { int toState = toStateIter.next(); Function toProb = toProbIter.next(); if (midState != toState) { toProbIter.set(slStar.multiply(toProb)); } } toStateIter = pmc.transitionTargets.get(midState).listIterator(); toProbIter = pmc.transitionProbs.get(midState).listIterator(); while (toStateIter.hasNext()) { int toState = toStateIter.next(); toProbIter.next(); if (midState == toState) { toStateIter.remove(); toProbIter.remove(); break; } } pmc.incoming.get(midState).clear(); }