-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathfactory.py
More file actions
120 lines (94 loc) · 4.45 KB
/
factory.py
File metadata and controls
120 lines (94 loc) · 4.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
#!/usr/bin/env python
import os
from math import sqrt
def retSimilarity(dataset, reference, sim):
''' Take a dataset of live data from the Arduino,
take the reference dataset from the Pickle, and
calculate similarity/distance between the two.
sim - Type of metric to use.
1. Taxicab metric.
2. Euclidean distance.
3. Chebyshev distance.
4. Pearson coefficient. '''
mismatch = []
xfilter = 0
yfilter = 0
# Iterate through the datasets in the reference (serialized data).
for eachRefSet in range(len(reference)):
refLength = len(reference[eachRefSet])
mismatch.append(0)
firstSum, secondSum = 0, 0
firstSumSq, secondSumSq = 0, 0
cumulativeSum = 0
# Apply similarity metrics on each tuple of values.
for pair in range(refLength):
if sim == 1: #Taxicab metric: http://en.wikipedia.org/wiki/Taxicab_geometry
mismatch[eachRefSet] += 1 / float(1 + (abs(dataset[pair][0] - reference[eachRefSet][pair][0]) + \
abs(dataset[pair][1] - reference[eachRefSet][pair][1])))
elif sim == 2: #Euclidean distance: http://en.wikipedia.org/wiki/Euclidean_distance
mismatch[eachRefSet] += 1 / float(1 + sqrt(pow(dataset[pair][0] - reference[eachRefSet][pair][0], 2) \
+ pow(dataset[pair][1] - reference[eachRefSet][pair][1], 2)))
elif sim == 3: #Chebyshev distance: http://en.wikipedia.org/wiki/Chebyshev_distance
xfilter += abs(dataset[pair][0] - reference[eachRefSet][pair][0])
yfilter += abs(dataset[pair][1] - reference[eachRefSet][pair][1])
elif sim == 4: #Pearson coefficient: http://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
firstSum += dataset[pair][1]
secondSum += reference[eachRefSet][pair][1]
firstSumSq += pow(dataset[pair][1], 2)
secondSumSq += pow(reference[eachRefSet][pair][1], 2)
cumulativeSum += dataset[pair][1] * reference[eachRefSet][pair][1]
# Additional calculations for Chebyshev and Pearson metrics.
if sim == 3:
mismatch[eachRefSet] = 1 / float(1 + max(xfilter, yfilter))
xfilter, yfilter = 0, 0
if sim == 4:
top = cumulativeSum - float(firstSum * secondSum / refLength)
bottom = sqrt((firstSumSq - float(pow(firstSum, 2) / refLength)) * \
(secondSumSq - float(pow(secondSum, 2) / refLength)))
if bottom == 0 or top == 0:
mismatch[eachRefSet] = 0
else:
mismatch[eachRefSet] = float(top) / float(bottom)
return mismatch
def scale(list_of_lists):
''' Take a list of lists that contain similarity scores.
Find the mean of all the similarity scores in the list. '''
scaledScore = []
for x in range(len(list_of_lists[0])):
scaledScore.append(0)
for y in range(len(list_of_lists)):
scaledScore[x] += list_of_lists[y][x]
scaledScore[x] /= len(list_of_lists)
return scaledScore
def classify(scaled_values):
''' Take a list of scaled similarities and find out
which eye position has the list of values that is closest in
similarity to the currently acquired list of values from
the serial port. '''
positions = {1 : 'BLINK',
2 : 'UP',
3 : 'UP-RIGHT',
4 : 'RIGHT',
5 : 'DOWN-RIGHT',
6 : 'DOWN',
7 : 'DOWN-LEFT',
8 : 'LEFT',
9 : 'UP-LEFT',
10 : 'STRAIGHT'}
positionDemarc = positions.values()
classify = {}
# Assign eye position to each scaled value in that order.
for a in range(len(scaled_values)):
classify[scaled_values[a]] = positionDemarc[a]
scaled_values.sort()
# Returning the eye position corresponding to highest similarity.
return classify[scaled_values[-1]]
if __name__ == "__main__":
a = [[(1,500),(3,510),(2,520),(3,530),(4,550),(5,600)], [(5,600),(4,550),(3,530),(2,520),(3,510),(1,500)]]
b = [(100,624),(531,652),(7,11800),(9,120),(2,500),(1,602)]
c = [retSimilarity(b, a, 1), retSimilarity(b, a, 2), retSimilarity(b, a, 3), retSimilarity(b, a, 4)]
d = scale([retSimilarity(b, a, 4)])
print c
print scale(c)
print classify(scale(c))
print classify(d)