X-Git-Url: http://git.treefish.org/~alex/shutbox.git/blobdiff_plain/f3518c9f8f23a3df8eea91b3d4f23b8685123392..a283dadadd02c6c2fa68feb2a58afa1cd6fe530f:/src/qtable.py?ds=sidebyside diff --git a/src/qtable.py b/src/qtable.py index 0804187..fb65008 100755 --- a/src/qtable.py +++ b/src/qtable.py @@ -6,7 +6,7 @@ import sys from game import Game -learning_rate = 0.1 +learning_rate = 0.001 discount_factor = 1.0 states_dim = 36864 # 2^10 * 6^2 @@ -34,7 +34,6 @@ def select_option(opts, qs): opt_qid = find_option_qid(opt) opt_qid_pairs.append( [opt, opt_qid] ) opt_qsum += qs[opt_qid] - #random.shuffle(opt_qid_pairs) ran_pt = random.uniform(0.0, opt_qsum) decision_pt = 0.0 for opt_qid_pair in opt_qid_pairs: @@ -57,14 +56,14 @@ for i in range(num_episodes): old_score = g.get_score() g.shut(opt) g.dice() - reward = (g.get_score() - old_score) / 11.0 + reward = g.get_score() - old_score new_state_qid = find_state_qid(g.get_shutable(), g.get_diced()) Q[state_qid, opt_qid] += \ learning_rate * (reward + discount_factor * np.max(Q[new_state_qid, :]) - Q[state_qid, opt_qid]) state_qid = new_state_qid - Q[state_qid, opt_qid] = 0 + Q[state_qid, :] = 0.0 running_score[0] *= 0.99999999 running_score[0] += g.get_score() running_score[1] *= 0.99999999