12 states_dim = 147456 # 2^12 * 6^2
13 actions_dim = 637 # 12+1 * (6+1)^2
14 num_episodes = 10000000000
16 def find_state_qid(shutable, diced):
20 for i in range(len(diced)):
21 qid += (diced[i]-1) * pow(6, i) * pow(2, 12)
24 def find_option_qid(option):
26 for i in range(len(option)):
27 qid += option[i] * pow(7, i) * pow(13, len(option)-1)
30 def select_option(opts, qs):
34 opt_qid = find_option_qid(opt)
35 opt_qid_pairs.append( [opt, opt_qid] )
36 opt_qsum += qs[opt_qid]
37 random.shuffle(opt_qid_pairs)
38 ran_pt = random.uniform(0.0, opt_qsum)
40 for opt_qid_pair in opt_qid_pairs:
41 decision_pt += qs[ opt_qid_pair[1] ]
42 if ran_pt <= decision_pt:
43 return (opt_qid_pair[0], opt_qid_pair[1])
46 Q = np.ones([states_dim, actions_dim])
48 running_score = [0.0, 0.0]
50 for i in range(num_episodes):
53 state_qid = find_state_qid(g.get_shutable(), g.get_diced())
54 while not g.is_over():
55 opt, opt_qid = select_option( g.get_options(), Q[state_qid, :] )
57 old_score = g.get_score()
60 reward = (g.get_score() - old_score) / 12.0
61 new_state_qid = find_state_qid(g.get_shutable(), g.get_diced())
62 Q[state_qid, opt_qid] += \
63 learning_rate * (reward
64 + discount_factor * np.max(Q[new_state_qid, :])
65 - Q[state_qid, opt_qid])
66 state_qid = new_state_qid
68 Q[state_qid, opt_qid] = 0
69 running_score[0] *= 0.99999999
70 running_score[0] += g.get_score()
71 running_score[1] *= 0.99999999
72 running_score[1] += 1.0
73 print( "%d: %f" % (i, running_score[0]/running_score[1]) )