/* * input transition exp loop through transition experiences * * for each transition t use Q update rule oldState and action comes from t * new state comes from t * * for Q update fill up Q_table (output) */ public void Qsweep(List<Transition> transitions) { if (firstTimeQ) initTable(transitions); for (int index = 0; index < transitions.size(); index++) { Transition t = transitions.get(index); // get reward (if any) float reward = -0.08f; // small penalty for each timestep used if (t.getNextState().getEnemyHealth() < 0.001) { reward = 5; } else if (t.getNextState().getOgreHealth() < 0.001) { reward = -10; } else if ( // t.getNextState().getEnemyHealth() < // t.getCurrentState() // .getEnemyHealth()&& t.getNextState().getOgreEnergy() < t.getCurrentState().getOgreEnergy()) { // if we just got him with the bow reward = 5; // System.out.println("got reward"); } Q_table[index][0] += alpha * (reward + gamma * queryBestState(t) - Q_table[index][0]); // original version if (Q_table[index][0] > 10) Q_table[index][0] = 10; if (Q_table[index][0] < -10) Q_table[index][0] = -10; // System.out.println("Q: "+index+": "+Q_tableNN[index][0]); } firstTimeQ = false; }
public double queryBestState(Transition t) { double energy = t.getNextState().getOgreEnergy(); double distance = t.getNextState().getEnemyDistance(); double vAttack = FQ_learner.nearestValue(energy, distance, 0); double vEvade = FQ_learner.nearestValue(energy, distance, 1); if (vAttack > vEvade) return vAttack; else return vEvade; }
private void printTrans(Transition t) { System.out.print( "trans:" + t.getCurrentState().getOgreHealth() + " " + t.getCurrentState().getOgreEnergy() + " " + t.getCurrentState().getEnemyHealth() + " " + t.getCurrentState().getEnemyDistance() + " " + t.getAction() + " " + t.getNextState().getOgreHealth() + " " + t.getNextState().getOgreEnergy() + " " + t.getNextState().getEnemyHealth() + " " + t.getNextState().getEnemyDistance()); }