-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathneuralnetwork.py
More file actions
115 lines (96 loc) · 3.11 KB
/
neuralnetwork.py
File metadata and controls
115 lines (96 loc) · 3.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import numpy as np
import pandas as pd
import matplotlib
import pickle
from matplotlib import pyplot as plt
def init_parameters():
W1 = np.random.rand(10, 784) - 0.5
b1 = np.random.rand(10, 1) - 0.5
W2 = np.random.rand(10, 10) - 0.5
b2 = np.random.rand(10, 1) - 0.5
return W1, b1, W2, b2
def load_parameters(filename='model_weights.pkl'):
with open(filename, 'rb') as f:
parameters = pickle.load(f)
return parameters['W1'], parameters['b1'], parameters['W2'], parameters['b2']
def ReLU(Z):
return np.maximum(0, Z)
def softmax(Z):
A = np.exp(Z) / sum(np.exp(Z))
return A
def forward_prop(W1, b1, W2, b2, X):
Z1 = W1.dot(X) + b1
A1 = ReLU(Z1)
Z2 = W2.dot(A1) + b2
A2 = softmax(Z2)
return Z1, A1, Z2, A2
def one_hot(Y):
one_hot_Y = np.zeros((Y.size, Y.max() + 1))
one_hot_Y[np.arange(Y.size), Y] = 1
one_hot_Y = one_hot_Y.T
return one_hot_Y
def deriv_ReLU(Z):
#True = 1, False = 0
return Z > 0
def back_prop(Z1, A1, Z2, A2, W1, W2, X, Y):
one_hot_Y = one_hot(Y)
dZ2 = A2 - one_hot_Y
dW2 = 1 / m * dZ2.dot(A1.T)
db2 = 1/ m * np.sum(dZ2, axis=1).reshape(-1, 1)
dZ1 = W2.T.dot(dZ2) * deriv_ReLU(Z1)
dW1 = 1 / m * dZ1.dot(X.T)
db1 = 1/ m * np.sum(dZ1, axis=1).reshape(-1, 1)
return dW1, db1, dW2, db2
def update_params(W1, b1, W2, b2, dW1, db1, dW2, db2, alpha):
W1 = W1 - alpha * dW1
b1 = b1 - alpha * db1
W2 = W2 - alpha * dW2
b2 = b2 - alpha * db2
return W1, b1, W2, b2
def get_predictions(A2):
return np.argmax(A2, 0)
def get_accuracy(predictions, Y):
print(predictions, Y)
return np.sum(predictions == Y) / Y.size
def make_prediction(X, W1, b1, W2, b2):
_, _, _, A2 = forward_prop(W1, b1, W2, b2, X)
predictions = get_predictions(A2)
probabilities = A2
return predictions, probabilities
def save_parameters(W1, b1, W2, b2, filename='model_weights.pkl'):
"""Save the trained parameters to a file"""
parameters = {
'W1': W1,
'b1': b1,
'W2': W2,
'b2': b2
}
with open(filename, 'wb') as f:
pickle.dump(parameters, f)
print(f"Parameters saved to {filename}")
def gradient_descent(X, Y, iterations, alpha):
W1, b1, W2, b2 = init_parameters()
for i in range(1, iterations + 1):
Z1, A1, Z2, A2 = forward_prop(W1, b1, W2, b2, X)
dW1, db1, dW2, db2 = back_prop(Z1, A1, Z2, A2, W1, W2, X, Y)
W1, b1, W2, b2 = update_params(W1, b1, W2, b2, dW1, db1, dW2, db2, alpha)
if i % 50 == 0:
print("Iteration: ", i)
print("Accuracy: ", get_accuracy(get_predictions(A2), Y))
return W1, b1, W2, b2
if __name__ == "__main__":
data = pd.read_csv('./dataset/train.csv')
data.head()
data = np.array(data)
m, n = data.shape
np.random.shuffle(data)
data_dev = data[0:1000].T
Y_dev = data_dev[0]
X_dev = data_dev[1:n]
X_dev = X_dev / 255.0
data_train = data[1000:m].T
Y_train = data_train[0]
X_train = data_train[1:n]
X_train = X_train / 255.0
W1, b1, W2, b2 = gradient_descent(X_train, Y_train, 500, 0.1)
save_parameters(W1, b1, W2, b2)