/*
   * input transition exp loop through transition experiences
   *
   * for each transition t use Q update rule oldState and action comes from t
   * new state comes from t
   *
   * for Q update fill up Q_table (output)
   */
  public void Qsweep(List<Transition> transitions) {
    if (firstTimeQ) initTable(transitions);
    for (int index = 0; index < transitions.size(); index++) {
      Transition t = transitions.get(index);

      // get reward (if any)
      float reward = -0.08f; // small penalty for each timestep used
      if (t.getNextState().getEnemyHealth() < 0.001) {
        reward = 5;
      } else if (t.getNextState().getOgreHealth() < 0.001) {
        reward = -10;
      } else if ( // t.getNextState().getEnemyHealth() <
      // t.getCurrentState()
      // .getEnemyHealth()&&
      t.getNextState().getOgreEnergy() < t.getCurrentState().getOgreEnergy()) {
        // if we just got him with the bow
        reward = 5;
        // System.out.println("got reward");
      }
      Q_table[index][0] += alpha * (reward + gamma * queryBestState(t) - Q_table[index][0]);

      // original version
      if (Q_table[index][0] > 10) Q_table[index][0] = 10;
      if (Q_table[index][0] < -10) Q_table[index][0] = -10;
      // System.out.println("Q: "+index+": "+Q_tableNN[index][0]);
    }
    firstTimeQ = false;
  }