12 states_dim = 36864 # 2^10 * 6^2
13 actions_dim = 539 # 10+1 * (6+1)^2
14 num_episodes = 10000000000
16 def find_state_qid(shutable, diced):
20 for i in range(len(diced)):
21 qid += (diced[i]-1) * pow(6, i) * pow(2, 10)
24 def find_option_qid(option):
26 for i in range(len(option)):
27 qid += option[i] * pow(7, i) * pow(11, len(option)-1)
30 def select_option(opts, qs):
34 opt_qid = find_option_qid(opt)
35 opt_qid_pairs.append( [opt, opt_qid] )
36 opt_qsum += qs[opt_qid]
37 ran_pt = random.uniform(0.0, opt_qsum)
39 for opt_qid_pair in opt_qid_pairs:
40 decision_pt += qs[ opt_qid_pair[1] ]
41 if ran_pt <= decision_pt:
42 return (opt_qid_pair[0], opt_qid_pair[1])
44 Q = np.ones([states_dim, actions_dim])
46 running_score = [0.0, 0.0]
49 for i in range(num_episodes):
52 state_qid = find_state_qid(g.get_shutable(), g.get_diced())
53 num_turn = random.randint(0, 1)
54 while not g.is_over():
55 options = g.get_options()
58 opt, opt_qid = select_option( options, Q[state_qid, :] )
62 new_state_qid = find_state_qid(g.get_shutable(), g.get_diced())
63 Q[state_qid, opt_qid] += \
64 learning_rate * (reward
65 + discount_factor * np.max(Q[new_state_qid, :])
66 - Q[state_qid, opt_qid])
67 state_qid = new_state_qid
69 choice = random.randint(0, len(options) - 1)
70 g.shut(options[choice])
72 state_qid = find_state_qid(g.get_shutable(), g.get_diced())
79 print(stats[0]/stats[1])