-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathoptimizers.py
More file actions
46 lines (41 loc) · 1.8 KB
/
optimizers.py
File metadata and controls
46 lines (41 loc) · 1.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
"""Some standard gradient-based stochastic optimizers.
These are just standard routines that don't make any use of autograd,
though you could take gradients of these functions too if you want
to do meta-optimization."""
from __future__ import absolute_import
import autograd.numpy as np
from builtins import range
def sgd(grad, x, callback=None, num_iters=200, step_size=0.1, mass=0.9):
"""Stochastic gradient descent with momentum.
grad() must have signature grad(x, i), where i is the iteration number."""
velocity = np.zeros(x.shape)
for i in range(num_iters):
g = grad(x, i)
if callback: callback(x, i, g)
velocity = mass * velocity - (1.0 - mass) * g
x += step_size * velocity
return x
def rmsprop(grad, x, callback=None, num_iters=100, step_size=0.1, gamma=0.9, eps = 10**-8):
"""Root mean squared prop: See Adagrad paper for details."""
avg_sq_grad = np.ones(len(x))
for i in range(num_iters):
g = grad(x, i)
if callback: callback(x, i, g)
avg_sq_grad = avg_sq_grad * gamma + g**2 * (1 - gamma)
x -= step_size * g/(np.sqrt(avg_sq_grad) + eps)
return x
def adam(grad, x, callback=None, num_iters=100,
step_size=0.001, b1=0.9, b2=0.999, eps=10**-8):
"""Adam as described in http://arxiv.org/pdf/1412.6980.pdf.
It's basically RMSprop with momentum and some correction terms."""
m = np.zeros(x.shape)
v = np.zeros(x.shape)
for i in range(num_iters):
g = grad(x, i)
if callback: callback(x, i, g)
m = (1 - b1) * g + b1 * m # First moment estimate.
v = (1 - b2) * (g**2) + b2 * v # Second moment estimate.
mhat = m / (1 - b1**(i + 1)) # Bias correction.
vhat = v / (1 - b2**(i + 1))
x -= step_size*mhat/(np.sqrt(vhat) + eps)
return x