-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathclassifier.py
More file actions
180 lines (159 loc) · 6.19 KB
/
classifier.py
File metadata and controls
180 lines (159 loc) · 6.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
"""
Huu Le
classifier.py contains methods for utilizing various
learning algorithms. Refer to each function for more
information.
"""
import numpy as np
import copy
def gaussNB(trainingData,trainingLabels):
"""
Implements a Gaussian Bayes Classifier to take in
the training data/labels and returns a classifier.
"""
#Initialize the Naive Bayes and start training
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(trainingData,trainingLabels)
GaussianNB()
print "Gaussian Naive Bayes Classifier has been generated with a training set size of ",len(trainingLabels) ,"."
return clf
def multNB(trainingData,trainingLabels):
"""
Uses Multinomial Naive Bayes, which takes in
training data in the from of vectors, and returns a
classifier. Note: The size of trainingLabels and
trainingData should bethe same size
"""
#Check to ensure same size
if not(len(trainingLabels) == len(trainingData)):
print "Error: Labels and Data are of different sizes: cannot train."
return
#Import from scikit and fit the data into the algorithm
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
clf.fit(trainingData, trainingLabels)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
print "Multinomial Naive Bayes Classifier has been generated with a training set size of ",len(trainingLabels) ,"."
return clf
def svmClassifier(trainingData,trainingLabels):
"""
Uses Support Vector Machine (SVM) as the algorithm as
the classifier. The 'kernel' param is used to determine
which kernel function to use. We are currently using rbf, the default.
Doesn't work as effectively when there are too many training samples
or too many features.
"""
#Check to ensure same size
if not(len(trainingLabels) == len(trainingData)):
print "Error: Labels and Data are of different sizes: cannot train."
return
#Import svm from scikit and fit the data into the classifier
from sklearn.svm import SVC
k = 'rbf'
clf = SVC(kernel='rbf')
clf.fit(trainingData, trainingLabels)
print "Support Vector Classifier with kernel:", k," has been generated with a training set size of",len(trainingLabels)
return clf
def perceptron(trainingData,trainingLabels):
"""
Implements a linear perceptron model as the
machine learning algorithm.
"""
from sklearn.linear_model import Perceptron
clf = Perceptron()
clf.fit(trainingData,trainingLabels)
print "Perceptron has been generated with a training set size of",len(trainingLabels)
return clf
def sgdClassifier(trainingData,trainingLabels):
"""
Implements Stochastic Gradient Descent as a classifier
to use as the learning algorithm. Works best if there is
a lot of training data and a lot of features.
"""
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss="hinge", penalty="l2")
clf.fit(trainingData, trainingLabels)
print "Stochastic Gradient Descent Classifier generated with a training set size of",len(trainingLabels)
return clf
def chooseClassifier(switch, trainingData,trainingLabels):
"""
chooseClassifier() is the main method that will call different
machine learning algorithms. The algorithm is based on what
'switch' value is passed in. Returns a classifier
"""
if switch == 1:
#Switch 1: Gaussian Bayes Classifier
return gaussNB(trainingData,trainingLabels)
if switch == 2:
#Switch 2: Multinomial Bayes Classifier
return multNB(trainingData,trainingLabels)
if switch == 3:
#Switch 3: SVM Classifier
return svmClassifier(trainingData,trainingLabels)
if switch == 4:
#Switch 4: Perceptron
return perceptron(trainingData,trainingLabels)
if switch == 5:
#Switch 5: Stochastic Gradient Descent
return sgdClassifier(trainingData,trainingLabels)
def predict(clf, newData):
"""
Takes in a classifier and data and returns a prediction
"""
return clf.predict(newData)
#Less efficient version of testAccuracy
def testAccuracy(clf,validationData,validationLabels):
"""
Given a declared classifier and validation data, run the
classifier on these sample values and record the accuracy.
Prints out the final accuracy at the end and returns it.
"""
size = len(validationLabels)
correct = 0.0
for x in range(size):
tru = validationLabels[x]
est = clf.predict(validationData[x])
if int(est) == int(tru):
correct += 1
#else:
#print "Estimation failed: ", est, " when correct label is ", tru
#print "Classifier's Guesses: ", clf.predict_proba(validationData[x])
accuracy = correct/size
print "Number correct:", correct, "out of", size,"; Accuracy:", accuracy*100, "%"
return accuracy
def testAccuracy2(clf,validationData,validationLabels):
"""
Given a declared classifier and validation data, run the
classifier and print the accuracy of the classifier. Returns
the accuracy as a decimal at the end for comparison.
"""
accuracy = clf.score(validationData,validationLabels)
print "Scoring classifier... Accuracy:", accuracy*100, "%"
return accuracy
def extractFile(filename):
"""
Takes in a filename holding the training data and
returns a tuple of the trainingLabels and the trainingData.
Assumes that the first value of each row is the label.
"""
dataset = np.loadtxt(filename, delimiter=",")
tempData = []
tempLabels = []
for x in range(len(dataset)):
tempLabels.append(dataset[x][0])
tempData.append(dataset[x][1:])
return (np.array(tempData),np.array(tempLabels))
#Testing code
(data,labels) = extractFile("train.csv")
(valData,valLabels) = extractFile("test.csv")
clf = chooseClassifier(1,data,labels)
testAccuracy(clf,valData,valLabels)
clf = chooseClassifier(2,data,labels)
testAccuracy(clf,valData,valLabels)
clf = chooseClassifier(3,data,labels)
testAccuracy(clf,valData,valLabels)
clf = chooseClassifier(4,data,labels)
testAccuracy(clf,valData,valLabels)
clf = chooseClassifier(5,data,labels)
testAccuracy(clf,valData,valLabels)