-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathplay.py
More file actions
40 lines (33 loc) · 1.15 KB
/
play.py
File metadata and controls
40 lines (33 loc) · 1.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gym
from agent import Agent
from environments.gym import GymEnvironment
# from environments.flappy_bird import FlappyBird
# from environments.coin_collector import CoinCollector
if __name__ == '__main__':
# env = GymEnvironment(gym.make('CartPole-v1'))
env = GymEnvironment(gym.make('LunarLander-v2'))
# env = FlappyBird()
# env = CoinCollector()
agent = Agent(
gamma=0.99,
epsilon=0,
alpha=0.0005,
input_dims=env.len_of_state(),
num_of_actions=env.num_of_actions(),
mem_size=1000000,
batch_size=64,
epsilon_decay=0.999,
epsilon_min=0.01
)
agent.load_model("./models/" + input("What file should the AI load in a brain from a file? ") + '.h5')
while input('Would you like to watch the AI play another game (Y/N): ')[0].lower() == 'y':
done = False
score = 0
observation = env.reset()
while not done:
action = agent.choose_action(observation)
observation, reward, done, info = env.step(action)
score += reward
env.render()
env.close()
print("Score was", score)