-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathNode.cpp
More file actions
103 lines (82 loc) · 3.31 KB
/
Node.cpp
File metadata and controls
103 lines (82 loc) · 3.31 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
/********************************************************************
* Kodi Neumiller *
* kneumiller *
* CSCI 446 *
* Assignment 5: Machine Learning *
* *
* An A.I. based off of a neural network pattern. *
* The program will take in a file of with each row having some *
* amount of input and one output. *
* *
* To compile: g++ -o MachineLearning MachineLearning.cpp *
* To run: ./MachineLearning *
********************************************************************/
#include <cstdlib>
#include <cmath>
#include "Node.h"
//Want to have a higher learning rate because there is less data available to train on
// The higher the rate, the faster (sometimes more reckless) it learns
double Node::learningRate = 0.5; //Learning rate between [0.0 - 1.0]
double Node::alpha = 0.5; //Momentum between [0.0 - n]
Node::Node(int numOutputs, int myIndex) {
for (int i = 0; i < numOutputs; i++) {
outputWeight.push_back(randomWeight());
}
nodeIndex = myIndex;
}
double Node::randomWeight() {
//Returns a value between 0 and 1
return rand() / double(RAND_MAX);
}
void Node::nodeFeedForward(vector<Node> previousLayer) {
double sum = 0;
for (int i = 0; i < previousLayer.size(); i++) {
sum += previousLayer[i].getOutputVal() *
previousLayer[i].outputWeight[nodeIndex];
}
outputVal = transferFunction(sum);
}
double Node::transferFunction(double x) {
//Use the tangeant function: range [-1.0 - 1.0]
return tanh(x);
}
double Node::transferFunctionDerivative(double x) {
//Tangeant function derivative (1 - tan^2(x))
return 1.0 - x * x;
}
void Node::setOutputVal(double value) {
outputVal = value;
}
double Node::getOutputVal() {
return outputVal;
}
void Node::outputGradient(double value) {
//Find the difference between the target val and the actual val it has
double difference = value - outputVal;
gradient = difference * transferFunctionDerivative(outputVal);
}
void Node::hiddenGradient(vector <Node> nextLayer) {
//Derivative of weights
double derivative = 0;
//Sum the contributions of the errors that we make to the
// nodes that we feed in the next layer
for (int i = 0; i < nextLayer.size() - 1; i++) {
derivative += outputWeight[i] * nextLayer[i].gradient;
}
gradient = derivative * transferFunctionDerivative(outputVal);
}
void Node::updateInputWeights(vector<Node> previousLayer) {
for (int i = 0; i < previousLayer.size(); i++) {
//The other node in the previous layer being updated
Node otherNode = previousLayer[i];
//The other node's connection weight between itself and the current node
double oldWeightDifference = otherNode.changeInWeight[nodeIndex];
//To find the new delta weight:
// take the individual input, magnify it by the gradient and training weight,
// and add momentum (a fraction of the previous delta weight)
double newWeightDifference =
(learningRate * otherNode.getOutputVal() * gradient) + (alpha * oldWeightDifference);
otherNode.changeInWeight[nodeIndex] = newWeightDifference;
otherNode.outputWeight[nodeIndex] += newWeightDifference;
}
}