]> git.treefish.org Git - shutbox.git/commitdiff
10 rods only
authorAlexander Schmidt <alex@treefish.org>
Mon, 2 Nov 2020 22:29:50 +0000 (23:29 +0100)
committerAlexander Schmidt <alex@treefish.org>
Mon, 2 Nov 2020 22:29:50 +0000 (23:29 +0100)
src/game.py
src/qtable.py
src/random-agent.py

index 5fc133a4280169935ae5b4e376e210f3d6610c07..203f56a9e31fb09ba9b4651418af1d8287ac7e28 100644 (file)
@@ -2,7 +2,7 @@ import random
 
 class Game:
     def __init__(self):
-        self._shutable = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
+        self._shutable = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
         self._diced = None
         self._options = []
         self._score = 0
index 5dda44424da840faab10be2ac05307bcfaf33780..0804187c15694b0774f4f93daff23672c3f14467 100755 (executable)
@@ -9,8 +9,8 @@ from game import Game
 learning_rate = 0.1
 discount_factor = 1.0
 
-states_dim = 147456 # 2^12 * 6^2
-actions_dim = 637 # 12+1 * (6+1)^2
+states_dim = 36864 # 2^10 * 6^2
+actions_dim = 539 # 10+1 * (6+1)^2
 num_episodes = 10000000000
 
 def find_state_qid(shutable, diced):
@@ -18,13 +18,13 @@ def find_state_qid(shutable, diced):
     for rod in shutable:
         qid += pow(2, rod-1)
     for i in range(len(diced)):
-        qid += (diced[i]-1) * pow(6, i) * pow(2, 12)
+        qid += (diced[i]-1) * pow(6, i) * pow(2, 10)
     return qid
 
 def find_option_qid(option):
     qid = 0
     for i in range(len(option)):
-        qid += option[i] * pow(7, i) * pow(13, len(option)-1)
+        qid += option[i] * pow(7, i) * pow(11, len(option)-1)
     return qid
 
 def select_option(opts, qs):
@@ -34,7 +34,7 @@ def select_option(opts, qs):
         opt_qid = find_option_qid(opt)
         opt_qid_pairs.append( [opt, opt_qid] )
         opt_qsum += qs[opt_qid]
-    random.shuffle(opt_qid_pairs)
+    #random.shuffle(opt_qid_pairs)
     ran_pt = random.uniform(0.0, opt_qsum)
     decision_pt = 0.0
     for opt_qid_pair in opt_qid_pairs:
@@ -57,15 +57,14 @@ for i in range(num_episodes):
             old_score = g.get_score()
             g.shut(opt)
             g.dice()
-            reward = (g.get_score() - old_score) / 12.0
+            reward = (g.get_score() - old_score) / 11.0
             new_state_qid = find_state_qid(g.get_shutable(), g.get_diced())
             Q[state_qid, opt_qid] += \
                 learning_rate * (reward
                                  + discount_factor * np.max(Q[new_state_qid, :])
                                  - Q[state_qid, opt_qid])
             state_qid = new_state_qid
-        else:
-            Q[state_qid, opt_qid] = 0
+    Q[state_qid, opt_qid] = 0
     running_score[0] *= 0.99999999
     running_score[0] += g.get_score()
     running_score[1] *= 0.99999999
index d9354a163f3a50b714cb3ed3d8b592251d548384..36c84d5b4c6e11439224a686ae7726cbd317c9e1 100755 (executable)
@@ -15,9 +15,9 @@ def play_game():
 
 avg_score = 0.0
 
-for i in range(0, 10000):
+for i in range(0, 100000):
     avg_score += play_game()
 
-avg_score /= 10000
+avg_score /= 100000
 
 print(avg_score)