forked from werner-duvaud/muzero-general
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathself_play.py
More file actions
250 lines (217 loc) · 9.74 KB
/
self_play.py
File metadata and controls
250 lines (217 loc) · 9.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
import math
import time
import numpy
import ray
import torch
import models
from mcts import MCTS, Node, GameHistory, MinMaxStats
@ray.remote
class SelfPlay:
"""
Class which run in a dedicated thread to play games and save them to the replay-buffer.
"""
def __init__(self, initial_checkpoint, Game, config, seed):
self.config = config
self.game = Game(seed)
# Fix random generator seed
numpy.random.seed(seed)
torch.manual_seed(seed)
# Initialize the network
self.model = models.MuZeroNetwork(self.config)
self.model.set_weights(initial_checkpoint["weights"])
self.model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
self.model.eval()
def continuous_self_play(self, shared_storage, replay_buffer, test_mode=False):
while ray.get(
shared_storage.get_info.remote("training_step")
) < self.config.training_steps and not ray.get(
shared_storage.get_info.remote("terminate")
):
self.model.set_weights(ray.get(shared_storage.get_info.remote("weights")))
if not test_mode:
game_history = self.play_game(
self.config.visit_softmax_temperature_fn(
trained_steps=ray.get(
shared_storage.get_info.remote("training_step")
)
),
self.config.temperature_threshold,
False,
"self",
0,
)
replay_buffer.save_game.remote(game_history, shared_storage)
else:
# Take the best action (no exploration) in test mode
game_history = self.play_game(
0,
self.config.temperature_threshold,
False,
"self" if len(self.config.players) == 1 else self.config.opponent,
self.config.muzero_player,
)
# Save to the shared storage
shared_storage.set_info.remote(
{
"episode_length": len(game_history.action_history) - 1,
"total_reward": sum(game_history.reward_history),
"mean_value": numpy.mean(
[value for value in game_history.root_values if value]
),
}
)
if 1 < len(self.config.players):
shared_storage.set_info.remote(
{
"muzero_reward": sum(
reward
for i, reward in enumerate(game_history.reward_history)
if game_history.to_play_history[i - 1]
== self.config.muzero_player
),
"opponent_reward": sum(
reward
for i, reward in enumerate(game_history.reward_history)
if game_history.to_play_history[i - 1]
!= self.config.muzero_player
),
}
)
# Managing the self-play / training ratio
if not test_mode and self.config.self_play_delay:
time.sleep(self.config.self_play_delay)
if not test_mode and self.config.ratio:
while (
ray.get(shared_storage.get_info.remote("training_step"))
/ max(
1, ray.get(shared_storage.get_info.remote("num_played_steps"))
)
< self.config.ratio
and ray.get(shared_storage.get_info.remote("training_step"))
< self.config.training_steps
and not ray.get(shared_storage.get_info.remote("terminate"))
):
time.sleep(0.5)
self.close_game()
def play_game(
self, temperature, temperature_threshold, render, opponent, muzero_player
):
"""
Play one game with actions based on the Monte Carlo tree search at each moves.
"""
game_history = GameHistory()
observation = self.game.reset()
game_history.action_history.append(0)
game_history.observation_history.append(observation)
game_history.reward_history.append(0)
game_history.to_play_history.append(self.game.to_play())
done = False
if render:
self.game.render()
with torch.no_grad():
while (
not done and len(game_history.action_history) <= self.config.max_moves
):
assert (
len(numpy.array(observation).shape) == 3
), f"Observation should be 3 dimensionnal instead of {len(numpy.array(observation).shape)} dimensionnal. Got observation of shape: {numpy.array(observation).shape}"
assert (
numpy.array(observation).shape == self.config.observation_shape
), f"Observation should match the observation_shape defined in MuZeroConfig. Expected {self.config.observation_shape} but got {numpy.array(observation).shape}."
stacked_observations = game_history.get_stacked_observations(
-1,
self.config.stacked_observations,
)
# Choose the action
if opponent == "self" or muzero_player == self.game.to_play():
root, mcts_info = MCTS(self.config).run(
self.model,
stacked_observations,
self.game.legal_actions(),
self.game.to_play(),
True,
)
action = self.select_action(
root,
temperature
if not temperature_threshold
or len(game_history.action_history) < temperature_threshold
else 0,
)
if render:
print(f'Tree depth: {mcts_info["max_tree_depth"]}')
print(
f"Root value for player {self.game.to_play()}: {root.value():.2f}"
)
else:
action, root = self.select_opponent_action(
opponent, stacked_observations
)
observation, reward, done = self.game.step(action)
if render:
print(f"Played action: {self.game.action_to_string(action)}")
self.game.render()
game_history.store_search_statistics(root, self.config.action_space)
# Next batch
game_history.action_history.append(action)
game_history.observation_history.append(observation)
game_history.reward_history.append(reward)
game_history.to_play_history.append(self.game.to_play())
return game_history
def close_game(self):
self.game.close()
def select_opponent_action(self, opponent, stacked_observations):
"""
Select opponent action for evaluating MuZero level.
"""
if opponent == "human":
root, mcts_info = MCTS(self.config).run(
self.model,
stacked_observations,
self.game.legal_actions(),
self.game.to_play(),
True,
)
print(f'Tree depth: {mcts_info["max_tree_depth"]}')
print(f"Root value for player {self.game.to_play()}: {root.value():.2f}")
print(
f"Player {self.game.to_play()} turn. MuZero suggests {self.game.action_to_string(self.select_action(root, 0))}"
)
return self.game.human_to_action(), root
elif opponent == "expert":
return self.game.expert_agent(), None
elif opponent == "random":
assert (
self.game.legal_actions()
), f"Legal actions should not be an empty array. Got {self.game.legal_actions()}."
assert set(self.game.legal_actions()).issubset(
set(self.config.action_space)
), "Legal actions should be a subset of the action space."
return numpy.random.choice(self.game.legal_actions()), None
else:
raise NotImplementedError(
'Wrong argument: "opponent" argument should be "self", "human", "expert" or "random"'
)
@staticmethod
def select_action(node, temperature):
"""
Select action according to the visit count distribution and the temperature.
The temperature is changed dynamically with the visit_softmax_temperature function
in the config.
"""
visit_counts = numpy.array(
[child.visit_count for child in node.children.values()], dtype="int32"
)
actions = [action for action in node.children.keys()]
if temperature == 0:
action = actions[numpy.argmax(visit_counts)]
elif temperature == float("inf"):
action = numpy.random.choice(actions)
else:
# See paper appendix Data Generation
visit_count_distribution = visit_counts ** (1 / temperature)
visit_count_distribution = visit_count_distribution / sum(
visit_count_distribution
)
action = numpy.random.choice(actions, p=visit_count_distribution)
return action