/** * Obtain result for a given state. Before calling this method, all states must have been * eliminated. * * @param state state to obtain result for * @return result for given state */ Function getResult(int state) { /* due to state elimination, at this point each state: * A) either has only a self-loop, or * B) has no self loop and only transitions to one or more states * of the form A. */ if (pmc.isUseRewards() && !pmc.isUseTime()) { /* states which do not reach target states with probability one * are assigned a reward of infinity. Target states have a reward * of zero and only self-loops. Because of this, and from the state * elimination (see above), we can read the reward directly from * the according reward structure. */ return pmc.getReward(state); } else if (pmc.isUseRewards() && pmc.isUseTime()) { /* due to state elimination, each state either: A) has a self loop * and or: B) does not have a self-loop and only transitions to * states of the form A. The long-run average probability for states * of the form A is then just reward(state) / time(state). For all * states of both the form A and B, the long-run average is the * probability to move to a state of form A times the long-run * average value of that A state. */ ListIterator<Integer> toStateIter = pmc.transitionTargets.get(state).listIterator(); ListIterator<Function> toProbIter = pmc.transitionProbs.get(state).listIterator(); Function result = pmc.getFunctionFactory().getZero(); while (toStateIter.hasNext()) { int toState = toStateIter.next(); Function toProb = toProbIter.next(); result = result.add(toProb.multiply(pmc.getReward(toState)).divide(pmc.getTime(toState))); } return result; } else { /* due to state elimination, each state either: A) has a self loop * and then is a target state or cannot reach a target state at all, * or: B) is not a target state or a state which cannot reach * target states, and then does not have a self-loop and only * transitions to states of the form A. Because of this, to obtain * reachability probabilities, we just have to add up the one-step * probabilities to target states. */ ListIterator<Integer> toStateIter = pmc.transitionTargets.get(state).listIterator(); ListIterator<Function> toProbIter = pmc.transitionProbs.get(state).listIterator(); Function result = pmc.getFunctionFactory().getZero(); while (toStateIter.hasNext()) { int toState = toStateIter.next(); Function toProb = toProbIter.next(); if (pmc.isTargetState(toState)) { result = result.add(toProb); } } return result; } }
/** * Orders states so that states near target states are eliminated first. States which do not reach * target states are eliminated last. In case there are no target states, the order is arbitrary * * @return list of states in requested order */ private int[] collectStatesBackward() { int[] states = new int[pmc.getNumStates()]; BitSet seen = new BitSet(pmc.getNumStates()); HashSet<Integer> current = new HashSet<Integer>(); int nextStateNr = 0; for (int state = 0; state < pmc.getNumStates(); state++) { if (pmc.isTargetState(state)) { current.add(state); states[nextStateNr] = state; seen.set(state, true); nextStateNr++; } } while (!current.isEmpty()) { HashSet<Integer> next = new HashSet<Integer>(); for (int state : current) { for (int succState : pmc.incoming.get(state)) { if (!seen.get(succState)) { seen.set(succState, true); next.add(succState); states[nextStateNr] = succState; nextStateNr++; } } } current = next; } /* might not find all states when doing as above, * so add missing ones */ HashSet<Integer> allStates = new HashSet<Integer>(); for (int stateNr = 0; stateNr < states.length; stateNr++) { int state = states[stateNr]; allStates.add(state); } for (int state = 0; state < pmc.getNumStates(); state++) { if (!allStates.contains(state)) { states[nextStateNr] = state; nextStateNr++; } } return states; }