forked from ShangtongZhang/DeepRL
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathreplay.py
More file actions
50 lines (40 loc) · 1.86 KB
/
replay.py
File metadata and controls
50 lines (40 loc) · 1.86 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
#######################################################################
# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
class Replay:
def __init__(self, memory_size, batch_size, dtype=np.float32):
self.memory_size = memory_size
self.batch_size = batch_size
self.dtype = dtype
self.states = None
self.actions = np.empty(self.memory_size, dtype=np.int8)
self.rewards = np.empty(self.memory_size)
self.next_states = None
self.terminals = np.empty(self.memory_size, dtype=np.int8)
self.pos = 0
self.full = False
def feed(self, experience):
state, action, reward, next_state, done = experience
if self.states is None:
self.states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)
self.next_states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)
self.states[self.pos][:] = state
self.actions[self.pos] = action
self.rewards[self.pos] = reward
self.next_states[self.pos][:] = next_state
self.terminals[self.pos] = done
self.pos += 1
if self.pos == self.memory_size:
self.full = True
self.pos = 0
def sample(self):
upper_bound = self.memory_size if self.full else self.pos
sampled_indices = np.random.randint(0, upper_bound, size=self.batch_size)
return [self.states[sampled_indices],
self.actions[sampled_indices],
self.rewards[sampled_indices],
self.next_states[sampled_indices],
self.terminals[sampled_indices]]