9 states_dim = 147456 # 2^12 * 6^2
10 actions_dim = 637 # 12+1 * (6+1)^2
11 num_episodes = 10000000
13 def find_state_qid(shutable, diced):
17 for i in range(len(diced)):
18 qid += (diced[i]-1) * pow(6, i) * pow(2, 12)
21 def find_option_qid(option):
23 for i in range(len(option)):
24 qid += option[i] * pow(7, i) * pow(13, len(option)-1)
27 def select_option(opts, qs):
31 opt_qid = find_option_qid(opt)
32 opt_qid_pairs.append( [opt, opt_qid] )
33 opt_qsum += qs[opt_qid]
34 random.shuffle(opt_qid_pairs)
35 ran_pt = random.uniform(0.0, opt_qsum)
37 for opt_qid_pair in opt_qid_pairs:
38 decision_pt += qs[ opt_qid_pair[1] ]
39 if ran_pt <= decision_pt:
40 return (opt_qid_pair[0], opt_qid_pair[1])
43 Q = np.zeros([states_dim, actions_dim])
45 running_score = [0.0, 0.0]
47 for i in range(num_episodes):
50 state_qid = find_state_qid(g.get_shutable(), g.get_diced())
51 while not g.is_over():
52 opt, opt_qid = select_option( g.get_options(), Q[state_qid, :] )
54 old_score = g.get_score()
57 reward = g.get_score() - old_score
58 new_state_qid = find_state_qid(g.get_shutable(), g.get_diced())
61 Q[state_qid, opt_qid] = Q[state_qid, opt_qid] + \
62 lr * (reward + gamma * np.max(Q[new_state_qid, :]) - Q[state_qid, opt_qid])
63 state_qid = new_state_qid
64 running_score[0] *= 0.999
65 running_score[0] += g.get_score()
66 running_score[1] *= 0.999
67 running_score[1] += 1.0
68 print( "%d: %f" % (i, running_score[0]/running_score[1]) )