forked from apache/mxnet
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmlp_numpy.py
More file actions
50 lines (37 loc) · 1.51 KB
/
mlp_numpy.py
File metadata and controls
50 lines (37 loc) · 1.51 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# pylint: skip-file
import mxnet as mx
import logging
# define mlp
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
mlp = mx.symbol.Softmax(data = fc3, name = 'mlp')
# data
from sklearn.datasets import fetch_mldata
from sklearn.utils import shuffle
mnist = fetch_mldata('MNIST original', data_home="./data")
# shuffle data
X, y = shuffle(mnist.data, mnist.target)
# split dataset
train_data = X[:50000, :].astype('float32')
train_label = y[:50000]
val_data = X[50000: 60000, :].astype('float32')
val_label = y[50000:60000]
# Normalize data
train_data[:] /= 256.0
val_data[:] /= 256.0
batch_size = 100
# or you can use numpy iterator, which make using model easier
train_iter = mx.io.NDArrayIter(data=train_data, label=train_label, batch_size=batch_size, shuffle=True)
val_iter = mx.io.NDArrayIter(data=val_data, label=val_label, batch_size=batch_size)
logging.basicConfig(level=logging.DEBUG)
model = mx.model.FeedForward(
ctx = mx.cpu(), symbol = mlp, num_round = 20,
learning_rate = 0.1, momentum = 0.9, wd = 0.00001)
# train by using Numpy ndarray direcly
model.fit(X=train_data, y=train_label)
# train by using Numpy Iterator
# model.fit(X=train_iter, eval_data=val_iter)