コード例 #1
0
  /**
   * Performs precomputation before actual state elimination. This handles cases in which all or
   * some states can or have to be treated differently, e.g. because there are no target states at
   * all, or some states do never reach a target state.
   *
   * @return true iff state elimination is necessary to obtain a result
   */
  private boolean precompute() {
    /* if there are no target states, the result is zero everywhere
     * for a reachability probability analysis, so all states can be
     * made absorbing. If we are performing analysis of accumulated
     * rewards, the value will be infinity everywhere. */
    if (!pmc.isUseTime() && !pmc.hasTargetStates()) {
      for (int state = 0; state < pmc.getNumStates(); state++) {
        pmc.makeAbsorbing(state);
        if (pmc.isUseRewards()) {
          pmc.setReward(state, pmc.getFunctionFactory().getInf());
        }
      }
      return false;
    }

    /* search for states which might never reach a target state and thus
     * have to be assigned a reward of infinity. */
    if (pmc.isUseRewards()) {
      int[] backStatesArr = collectStatesBackward();
      HashSet<Integer> reaching = new HashSet<Integer>();
      for (int stateNr = 0; stateNr < backStatesArr.length; stateNr++) {
        reaching.add(backStatesArr[stateNr]);
      }
      for (int state = 0; state < pmc.getNumStates(); state++) {
        if (!pmc.isUseTime() && !reaching.contains(state)) {
          pmc.setReward(state, pmc.getFunctionFactory().getInf());
        }
      }
    }
    return true;
  }
コード例 #2
0
  /**
   * Eliminates a given state
   *
   * @param midState state to eliminate
   */
  private void eliminate(int midState) {
    Function loopProb = pmc.getSelfLoopProb(midState);
    /* states with only a self-loop require no further treatment */
    if (loopProb.equals(pmc.getFunctionFactory().getOne())) {
      return;
    }
    /* slStar = 1/(1-x), where x is the self-loop probability */
    Function slStar = loopProb.star();

    /* adapt rewards and time spent in state accordingly. The new
     * values correspond to adding the expected reward/time obtained
     * from moving to the midState from one of its predecessors, times
     * the probability of moving. */
    if (pmc.isUseRewards()) {
      pmc.setReward(midState, pmc.getReward(midState).multiply(slStar));
      for (int from : pmc.incoming.get(midState)) {
        if (from != midState) {
          pmc.setReward(
              from,
              pmc.getReward(from)
                  .add(pmc.getTransProb(from, midState).multiply(pmc.getReward(midState))));
        }
      }
    }
    if (pmc.isUseTime()) {
      pmc.setTime(midState, pmc.getTime(midState).multiply(slStar));
      for (int from : pmc.incoming.get(midState)) {
        if (from != midState) {
          pmc.setTime(
              from,
              pmc.getTime(from)
                  .add(pmc.getTransProb(from, midState).multiply(pmc.getTime(midState))));
        }
      }
    }

    /* redirect transitions of predecessors of midState. Redirection is
     * done such that some state fromState will have a probability of
     * moving to a successor state toState of midState with probability
     * (<fromState-to-midState-prob> * <midState-to-toState-prob)
     * / (1-<self-loop-prob>). (If there already was a transition from fromState
     * to toState, probabilities will be added up.). All transitions to
     * midState will be removed. */
    ArrayList<NewTransition> newTransitions = new ArrayList<NewTransition>();
    for (int fromState : pmc.incoming.get(midState)) {
      if (fromState != midState) {
        Function fromToMid = pmc.getTransProb(fromState, midState);
        ListIterator<Integer> toStateIter = pmc.transitionTargets.get(midState).listIterator();
        ListIterator<Function> toProbIter = pmc.transitionProbs.get(midState).listIterator();
        while (toStateIter.hasNext()) {
          int toState = toStateIter.next();
          Function midToTo = toProbIter.next();
          if (toState != midState) {
            Function fromToToAdd = fromToMid.multiply(slStar.multiply(midToTo));
            newTransitions.add(new NewTransition(fromState, toState, fromToToAdd));
          }
        }
      }
    }
    for (int fromState : pmc.incoming.get(midState)) {
      ListIterator<Integer> toStateIter = pmc.transitionTargets.get(fromState).listIterator();
      ListIterator<Function> toProbIter = pmc.transitionProbs.get(fromState).listIterator();
      while (toStateIter.hasNext()) {
        int state = toStateIter.next();
        toProbIter.next();
        if (state == midState) {
          toStateIter.remove();
          toProbIter.remove();
          break;
        }
      }
    }
    for (NewTransition newTransition : newTransitions) {
      pmc.addTransition(newTransition.fromState, newTransition.toState, newTransition.prob);
    }

    /* remove self loop from state and set outgoing probabilities to
     * <out-prob> / (1-<self-loop-prob>). This corresponds to the
     * probability to eventually leaving midState to a specific successor
     * state, after executing any number of self loops. */
    ListIterator<Integer> toStateIter = pmc.transitionTargets.get(midState).listIterator();
    ListIterator<Function> toProbIter = pmc.transitionProbs.get(midState).listIterator();
    while (toStateIter.hasNext()) {
      int toState = toStateIter.next();
      Function toProb = toProbIter.next();
      if (midState != toState) {
        toProbIter.set(slStar.multiply(toProb));
      }
    }
    toStateIter = pmc.transitionTargets.get(midState).listIterator();
    toProbIter = pmc.transitionProbs.get(midState).listIterator();
    while (toStateIter.hasNext()) {
      int toState = toStateIter.next();
      toProbIter.next();
      if (midState == toState) {
        toStateIter.remove();
        toProbIter.remove();
        break;
      }
    }
    pmc.incoming.get(midState).clear();
  }