from game import Game
-learning_rate = 0.1
+learning_rate = 0.001
discount_factor = 1.0
states_dim = 36864 # 2^10 * 6^2
opt_qid = find_option_qid(opt)
opt_qid_pairs.append( [opt, opt_qid] )
opt_qsum += qs[opt_qid]
- #random.shuffle(opt_qid_pairs)
ran_pt = random.uniform(0.0, opt_qsum)
decision_pt = 0.0
for opt_qid_pair in opt_qid_pairs:
decision_pt += qs[ opt_qid_pair[1] ]
if ran_pt <= decision_pt:
return (opt_qid_pair[0], opt_qid_pair[1])
- return (None, None)
Q = np.ones([states_dim, actions_dim])
g.dice()
state_qid = find_state_qid(g.get_shutable(), g.get_diced())
while not g.is_over():
- opt, opt_qid = select_option( g.get_options(), Q[state_qid, :] )
- if opt:
+ options = g.get_options()
+ if len(options) > 0:
+ opt, opt_qid = select_option( options, Q[state_qid, :] )
old_score = g.get_score()
g.shut(opt)
g.dice()
- reward = (g.get_score() - old_score) / 11.0
+ reward = g.get_score() - old_score
new_state_qid = find_state_qid(g.get_shutable(), g.get_diced())
Q[state_qid, opt_qid] += \
learning_rate * (reward
+ discount_factor * np.max(Q[new_state_qid, :])
- Q[state_qid, opt_qid])
state_qid = new_state_qid
- Q[state_qid, opt_qid] = 0
+ Q[state_qid, :] = 0.0
running_score[0] *= 0.99999999
running_score[0] += g.get_score()
running_score[1] *= 0.99999999