Example #1
0
  // Executes the given action
  public void executeAction(Action a) {

    // For each state variable, retrieve the DD for it, evaluate it
    // w.r.t. the current state and build a new state.
    ArrayList new_state = new ArrayList();
    int c;
    for (c = 0; c < (_nVars << 1); c++) {
      new_state.add("-");
    }
    for (c = 0; c < (_nVars << 1); c++) {
      Object cur_assign = _state.get(c);
      if (cur_assign instanceof Boolean) {
        // System.out.println(a._tmID2DD);
        int nonprime_id = c + 1;
        int prime_id = nonprime_id - _nVars;
        Object DD = a._tmID2DD.get(new Integer(prime_id));
        _state.set(prime_id - 1, TRUE);
        double p_true = _mdp._context.evaluate(DD, _state);
        // System.out.println("ID: " + nonprime_id + ", " + prime_id + ": " +
        //		   _mdp._context.printNode(DD) + " -> " +
        //		   _mdp._df.format(p_true));
        new_state.set(c, (_r.nextFloat() < p_true) ? TRUE : FALSE);
      }
    }

    _state = new_state;
  }
Example #2
0
 public void drawOrbits(ArrayList alObjectsArchive) {
   //  println("SIZE:" + alObjectsArchive.size());
   //  ArrayList alObjectsArchive = timeline.getObjectStateArchive();
   ArrayList alPrevPos = new ArrayList();
   ArrayList alColors = new ArrayList();
   alColors.add(color(255, 0, 0));
   alColors.add(color(255, 255, 0));
   alColors.add(color(255, 0, 255));
   //  for (int i = timeline.getTimeIdx(); i >= 0 && i > (timeline.getTimeIdx() - 1 - 100); i--)
   for (int i = 0; i < alObjectsArchive.size(); i++) {
     ArrayList objects = (ArrayList) alObjectsArchive.get(i);
     for (int j = 0; j < objects.size(); j++) {
       CelestialObject obj = (CelestialObject) objects.get(j);
       //      CelestialObject obj = (CelestialObject)objects.get(1);
       PVector pos = obj.getPosition();
       //      stroke(0, 0, 255);
       stroke((Integer) alColors.get(j));
       if (alPrevPos.size() == objects.size()) {
         PVector prevPos = (PVector) alPrevPos.get(j);
         line(prevPos.x, prevPos.y, pos.x, pos.y);
         alPrevPos.set(j, pos);
       } else alPrevPos.add(pos);
     }
   }
 }
Example #3
0
  // Derives QFunctions for the given value function and simulates the
  // greedy policy for the given number of trials and steps per trial.
  // Returns final value of every trial.
  public ArrayList simulate(int trials, int steps, long rand_seed) {
    ArrayList values = new ArrayList();
    _r = new Random(rand_seed);

    for (int trial = 1; trial <= trials; trial++) {

      System.out.println("\n -----------\n   Trial " + trial + "\n -----------");

      // Initialize state
      _state = new ArrayList();
      _nVars = _mdp._alVars.size();
      for (int c = 0; c < (_nVars << 1); c++) {
        _state.add("-");
      }
      Iterator i = _mdp._alVars.iterator();
      _vars = new TreeSet();
      while (i.hasNext()) {
        String s = (String) i.next();
        if (!s.endsWith("\'")) {
          Integer gid = (Integer) _mdp._tmVar2ID.get(s);
          _vars.add(gid);

          // Note: assign level (level is gid-1 b/c gids in order)
          _state.set(gid.intValue() - 1, _r.nextBoolean() ? TRUE : FALSE);
        }
      }
      // System.out.println(_mdp._context.printNode(_mdp._valueDD) + "\n" + _state);
      double reward = _mdp._context.evaluate(_mdp._rewardDD, _state);
      System.out.print(" " + PrintState(_state) + "  " + MDP._df.format(reward));

      // Run steps
      for (int step = 1; step <= steps; step++) {

        // Get action
        Action a;
        if (_bUseBasis) {
          a = getBasisAction();
        } else {
          a = getAction();
        }

        // Execute action
        executeAction(a);

        // Update reward
        reward =
            (_mdp._bdDiscount.doubleValue() * reward)
                + _mdp._context.evaluate(_mdp._rewardDD, _state);

        System.out.println(", a=" + a._sName);
        System.out.print(
            " " + PrintState(_state) + "  " + MDP._df.format(reward) + ": " + "Step " + step);
      }
      values.add(new Double(reward));
      System.out.println();
    }

    return values;
  }
Example #4
0
  // Fill in weights for specific counting aggregator approaches
  //
  // Basis function 1: Count of computers running
  // Basis function 2: Count of computers running and connected to
  //                   one other running computer
  //
  // Assuming action succeeds... compute next-state value based on action,
  // choose best action.
  public Action getBasisAction() {

    int best_reboot = -1;
    double best_reboot_val = -1d;
    for (int c = 1; c <= _nVars; c++) {
      ArrayList test_state = (ArrayList) _state.clone();
      test_state.set(c - 1 + _nVars, TRUE);
      double test_val = evalBasisState(test_state, W_BASIS_1, W_BASIS_2);
      if (test_val > best_reboot_val) {
        best_reboot_val = test_val;
        best_reboot = c;
      }
    }

    return (Action) _mdp._hmName2Action.get("reboot" + best_reboot);
  }
Example #5
0
 public void setCurrentState(ArrayList alState) {
   for (int i = 0; i < alStatefulObjects.size(); i++) {
     alStatefulObjects.set(i, alState.get(i));
   }
 }