-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgnb.py
More file actions
154 lines (114 loc) · 4.37 KB
/
gnb.py
File metadata and controls
154 lines (114 loc) · 4.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
# -*- coding: utf-8 -*-
"""GNB_LBW_final.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1vCh3BvecQrvmHpi3HDUkOwqohG0g8JE1
Gaussian Naive Bayes implementation on Low Birth Weight Data Set
"""
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
style.use('ggplot')
import pandas as pd
"""Take input from the dataset"""
lbw=pd.read_csv('./data/Final.csv')
print(lbw)
"""Drop columns like history which have zero variance"""
X = lbw.drop("reslt", axis=1)
X=X.drop("history",axis=1) #this gives variance zero, so better drop this
y = lbw["reslt"]
"""Normalize Data before its fed into the model"""
import pandas as pd
from sklearn import preprocessing
#normalize
x =X
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
X= pd.DataFrame(x_scaled)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.10, random_state=10)
# Good seed values - 10
print("X_train",X_train)
print("y_train",y_train)
"""Calculated Prior Probabilities for the classes"""
def get_count_unique_vals(labels):
return dict(labels.value_counts())
def prior_prob(labels):
counts=get_count_unique_vals(labels)
number_of_instances=labels.count()
#print(counts.items())
priors={(key,value/number_of_instances) for key, value in counts.items()}
return priors
priors=prior_prob(y_train)
print("priors",priors)
"""Calculate Mean and Variance For all Features"""
import math
def calculate_mean(df):
return df.mean()
def calculate_std_dev(df):
return df.std()
def Calculate_Mean_and_Variance(X_train,y_train):
mean_and_variance_for_class={}
classes=y_train.unique()
for everyclass in classes:
filtered_training_set=X_train[(y_train==everyclass)]
mean_and_variance=dict()
for every_attribute in list(X_train.columns.values):
particular_attribute=filtered_training_set[every_attribute]
mean_and_variance[every_attribute]=[]
mean_for_this_attribute=calculate_mean(particular_attribute)
mean_and_variance[every_attribute].append(mean_for_this_attribute)
std_dev_for_this_attribute=calculate_std_dev(particular_attribute)
var_for_this_attribute=math.pow(std_dev_for_this_attribute,2)
mean_and_variance[every_attribute].append(var_for_this_attribute)
mean_and_variance_for_class[everyclass]=mean_and_variance
return mean_and_variance_for_class
"""For every class and attribute, we keep track of mean and variance"""
dictionary=Calculate_Mean_and_Variance(X_train,y_train)
print("variance and mean for every class and attribute",(dictionary))
import operator
"""NOW using PDF Equation
Given a feature
"""
def calculate_probability(x, mean,variance):
return (1/(math.sqrt(2*math.pi*variance))) * math.exp(-(math.pow(x-mean,2)/(2*variance)))
def predict(X_test,mean_variance):
predictions = {}
for index, row in X_test.iterrows():
results = {}
for k, v in priors:
p = 0 #probability of every feature
for feature in list(X_test.columns.values):
prob = calculate_probability(row[feature], mean_variance[
k][feature][0], mean_variance[k][feature][1])
if prob > 0:
p += math.log(prob)
results[k] = math.log(v) + p
predictions[index] = max(results.items(), key=operator.itemgetter(1))[0] # assign class that has maximum probability
return predictions
predictions=predict(X_test,dictionary)
print("predictions for data in test set",predictions)
def acc(y_test,prediction):
count=0
for ind,row in y_test.iteritems():
if row == prediction[ind]:
count+=1
return count/len(y_test)*100.0
accuracy=acc(y_test,predictions)
print("accuracy (with a particular seed value) :",accuracy)
def count_for_thousand(X,y):
accuracy=0
count=0
for i in range(1000):
try:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.15)
priors=prior_prob(y_train)
dictionary=Calculate_Mean_and_Variance(X_train,y_train)
predictions=predict(X_test,dictionary)
accuracy+=acc(y_test,predictions)
count+=1
except ZeroDivisionError:
pass
return accuracy/count
print("avg accuracy for thousand train test shuffles:",count_for_thousand(X,y))
#done