-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmulti_class_tf.py
More file actions
130 lines (100 loc) · 4.4 KB
/
multi_class_tf.py
File metadata and controls
130 lines (100 loc) · 4.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import tensorflow as tf
import numpy as np
import pandas as pd
from random import seed
# Function importing Dataset
def importdata():
global train_data
train_data = pd.read_csv(
'train_wine.csv', sep =',', header=None)
print(train_data.shape)
test_data = pd.read_csv(
'test_wine.csv', sep =',', header=None)
print(test_data.shape)
return train_data.values, test_data.values
# Function to split the dataset
def splitdataset(data):
# Seperating the target variable
x = data[:, 1:data.shape[1]]
y = data[:, 0]
# print(np.shape(x), np.shape(y))
return x, y
def feature_normalization(x):
mu = np.mean(x,axis=0)
sigma = np.std(x,axis=0)
return mu, sigma
def normalization(x,mu,sigma):
x = np.subtract(x, mu)
x = np.divide(x, sigma)
return x
def dense_to_one_hot(labels_dense, num_classes=3):
labels_dense = np.subtract(labels_dense,1)
labels_one_hot = tf.one_hot(labels_dense,depth=3)
return labels_one_hot.eval()
def evaluate_model(X_train, X_test, y_train, y_test, epochs, batch_size):
init = tf.global_variables_initializer()
with tf.Session() as sess:
# Initialize the variables (i.e. assign their default value)
sess.run(init)
for epoch in range(epochs):
avg_cost = 0.0
total_batch = int(len(X_train) / batch_size)
x_batches = np.array_split(X_train, total_batch)
y_batches = np.array_split(y_train, total_batch)
for i in range(total_batch):
batch_x, batch_y = x_batches[i], y_batches[i]
batch_y = dense_to_one_hot(y_batches[i])
_, c = sess.run([optimizer, loss], feed_dict={ input_layer: batch_x, real_output: batch_y})
avg_cost += c / total_batch
if epoch % 100 == 0:
print("Epoch:", '%04d' % (epoch + 1), "loss=","{:.9f}".format(avg_cost))
print("\nTraining complete!")
# #prediction on test set
predict = tf.argmax(output_layer, 1)
pred = predict.eval({input_layer: X_test.reshape(-1, num_input)})
print(pred)
correct_prediction = np.add(pred, 1)
print("predictions",correct_prediction)
pred_temp = tf.equal(tf.argmax(output_layer, 1), tf.argmax(real_output, 1))
accuracy = tf.reduce_mean(tf.cast(pred_temp, "float"))
print("Test Accuracy:",accuracy.eval({input_layer: X_test.reshape(-1, num_input), real_output: dense_to_one_hot(y_test)}))
if __name__ == '__main__':
# To stop potential randomness
seed = 128
rng = np.random.RandomState(seed)
#get dataset
trainset, testset = importdata()
#split features, label
X_train, y_train = splitdataset(trainset)
X_test, y_test = splitdataset(testset)
#feature normalization
mu, sigma = feature_normalization(X_train)
X_train = normalization(X_train, mu, sigma)
X_test = normalization(X_test, mu, sigma)
# Network Parameters
num_input = X_train.shape[1] #features 12
num_hidden = 5
num_output = 3
# define placeholders
input_layer = tf.placeholder(tf.float32, [None, num_input])
real_output = tf.placeholder(tf.float32, [None, num_output])
# Training Parameters
learning_rate = 0.01
epochs = 1000
batch_size = 50
# define weights and biases of the neural network
hidden_layer_weights = tf.Variable(tf.random_normal([num_input, num_hidden], seed = seed))
hidden_layer_biases = tf.Variable(tf.random_normal([num_hidden],seed = seed))
# hidden_layer_biases = tf.Variable(tf.zeros([num_hidden]))
output_layer_weights = tf.Variable(tf.random_normal([num_hidden, num_output],seed = seed))
output_layer_biases = tf.Variable(tf.random_normal([num_output],seed = seed))
# output_layer_biases = tf.Variable(tf.zeros([num_output]))
# create our neural networks computational graph
hidden_layer = tf.nn.sigmoid(tf.add(tf.matmul(input_layer, hidden_layer_weights), hidden_layer_biases))
# hidden_layer = tf.nn.relu(hidden_layer)
output_layer = tf.matmul(hidden_layer, output_layer_weights) + output_layer_biases
loss = tf.reduce_mean(tf.square(real_output - output_layer)) #squared error
#our backpropogation algorithm | ADAM is variant of Gradient Descent algorithm
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss)
# #training
evaluate_model(X_train, X_test, y_train, y_test, epochs, batch_size)