-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathBackPropogation.py
More file actions
104 lines (80 loc) · 3.61 KB
/
BackPropogation.py
File metadata and controls
104 lines (80 loc) · 3.61 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import numpy as np
# Sigmoid activation function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Derivative of Sigmoid activation function
def sigmoid_derivative(x):
return x * (1 - x)
# Artificial Neural Network Class
class ArtificialNeuralNetwork:
def __init__(self, input_size, hidden_size, output_size, learning_rate=0.1):
# Initialize the parameters
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.learning_rate = learning_rate
# Randomly initialize weights for input to hidden and hidden to output layers
self.weights_input_hidden = np.random.randn(input_size, hidden_size)
self.weights_hidden_output = np.random.randn(hidden_size, output_size)
# Initialize biases for the hidden and output layers
self.bias_hidden = np.random.randn(hidden_size)
self.bias_output = np.random.randn(output_size)
# Feedforward function
def forward(self, X):
self.input = X
# Input to hidden layer
self.hidden_input = np.dot(self.input, self.weights_input_hidden) + self.bias_hidden
self.hidden_output = sigmoid(self.hidden_input)
# Hidden to output layer
self.output_input = np.dot(self.hidden_output, self.weights_hidden_output) + self.bias_output
self.output = sigmoid(self.output_input)
return self.output
# Backpropagation algorithm
def backward(self, X, Y):
# Calculate the error (difference between actual and predicted)
error = Y - self.output
# Calculate the output layer delta
d_output = error * sigmoid_derivative(self.output)
# Calculate the error for the hidden layer
error_hidden = d_output.dot(self.weights_hidden_output.T)
# Calculate the hidden layer delta
d_hidden = error_hidden * sigmoid_derivative(self.hidden_output)
# Update weights and biases for the hidden to output layer
self.weights_hidden_output += self.hidden_output.T.dot(d_output) * self.learning_rate
self.bias_output += np.sum(d_output, axis=0) * self.learning_rate
# Update weights and biases for the input to hidden layer
self.weights_input_hidden += self.input.T.dot(d_hidden) * self.learning_rate
self.bias_hidden += np.sum(d_hidden, axis=0) * self.learning_rate
# Training the neural network
def train(self, X, Y, epochs=10000):
for epoch in range(epochs):
# Perform feedforward pass
output = self.forward(X)
# Perform backpropagation to update weights and biases
self.backward(X, Y)
# Optionally, print the error during training for debugging
if epoch % 1000 == 0:
error = np.mean(np.abs(Y - output))
print(f'Epoch {epoch} - Error: {error}')
# Make predictions after training
def predict(self, X):
return self.forward(X)
# Example usage
if __name__ == "__main__":
# Example data (XOR gate)
X = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]])
Y = np.array([[0],
[1],
[1],
[0]])
# Create an instance of the ANN with 2 input nodes, 4 hidden nodes, and 1 output node
ann = ArtificialNeuralNetwork(input_size=2, hidden_size=4, output_size=1, learning_rate=0.1)
# Train the network
ann.train(X, Y, epochs=10000)
# Test the network with the same input data
predictions = ann.predict(X)
print("Predictions after training:")
print(np.round(predictions)) # Round to 0 or 1 for XOR problem