12 states_dim = 36864 # 2^10 * 6^2
13 actions_dim = 539 # 10+1 * (6+1)^2
14 num_episodes = 10000000000
16 def find_state_qid(shutable, diced):
20 for i in range(len(diced)):
21 qid += (diced[i]-1) * pow(6, i) * pow(2, 10)
24 def find_option_qid(option):
26 for i in range(len(option)):
27 qid += option[i] * pow(7, i) * pow(11, len(option)-1)
30 def select_option(opts, qs):
34 opt_qid = find_option_qid(opt)
35 opt_qid_pairs.append( [opt, opt_qid] )
36 opt_qsum += qs[opt_qid]
37 ran_pt = random.uniform(0.0, opt_qsum)
39 for opt_qid_pair in opt_qid_pairs:
40 decision_pt += qs[ opt_qid_pair[1] ]
41 if ran_pt <= decision_pt:
42 return (opt_qid_pair[0], opt_qid_pair[1])
44 Q = np.ones([states_dim, actions_dim])
46 running_score = [0.0, 0.0]
48 for i in range(num_episodes):
51 state_qid = find_state_qid(g.get_shutable(), g.get_diced())
52 while not g.is_over():
53 options = g.get_options()
55 opt, opt_qid = select_option( options, Q[state_qid, :] )
56 old_score = g.get_score()
59 reward = g.get_score() - old_score
60 new_state_qid = find_state_qid(g.get_shutable(), g.get_diced())
61 Q[state_qid, opt_qid] += \
62 learning_rate * (reward
63 + discount_factor * np.max(Q[new_state_qid, :])
64 - Q[state_qid, opt_qid])
65 state_qid = new_state_qid
67 running_score[0] *= 0.99999999
68 running_score[0] += g.get_score()
69 running_score[1] *= 0.99999999
70 running_score[1] += 1.0
71 print( "%d: %f" % (i, running_score[0]/running_score[1]) )