-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
333 lines (259 loc) · 9.63 KB
/
main.py
File metadata and controls
333 lines (259 loc) · 9.63 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
import cv2
import numpy as np
import os
from unicodedata import name
from webbrowser import get
import openai
from gtts import gTTS
import os
api_key = "#your_api_key"
openai.organization = "#organization_k"
openai.api_key = api_key
def get_prompt(trash):
my_prompt = """
Decide where should trash be put: Household food waste, Recyclable Waste, Hazardous waste, or All other non-recyclable solid waste.
Trash: Paper
Bin: Recyclable Waste
Trash: Mobile Phone
Bin: Hazardous waste
Trash: Electronic Device
Bin: Hazardous waste
Trash: Fruit peels
Bin: Household food waste
Trash: Batteries
Bin: Hazardous waste
Trash: Bubble Wrap
Bin: All other non-recyclable solid waste
Trash: Clothes
Bin: Recyclable Waste
Trash: Any clothing
Bin: Recyclable waste
Trash: Dress
Bin: Recyclable waste
Trash:
"""
return my_prompt + trash
def what_bin(trash):
my_prompt = get_prompt(trash)
comp = openai.Completion.create(
engine="text-davinci-002",
prompt=my_prompt,
max_tokens=20
)
#print(comp)
#print(type(comp))
resp = comp["choices"][0]["text"]
i = (resp.find("Bin:"))
output = "Use " + resp[(i + 5)::] + " bin."
print(output)
return output
def get_advice(trash):
request = "Give detailed advice on recycling " + trash
comp = openai.Completion.create(
engine="text-davinci-002",
prompt=request,
max_tokens=200
)
output = comp["choices"][0]["text"]
print(output)
return output
def description(trash):
#print("What trash do you have?")
#trash = input()
first = what_bin(trash)
second = get_advice(trash)
final_text = "If you want to throw away " + trash + " " + first + second
language = 'en'
myobj = gTTS(text=final_text, lang=language, slow=False)
#name_for_the_audio = trash + ".mp3"
myobj.save("audio_current.mp3")
#name_for_the_audio = "mpg321 " + name_for_the_audio
#os.system(name_for_the_audio)
return
def text_rec():
print()
print("WHAT DO YOU NEED TO THROW AWAY?")
trash = input()
description(trash)
net = cv2.dnn.readNet('yolov3.weights', 'yolov3.cfg')
def video_rec():
return1 = []
count = 0
classes = []
font = cv2.FONT_HERSHEY_PLAIN
colors = np.random.uniform(0,255,size = (100,3))
with open('coco.names', 'r') as f:
classes = f.read().splitlines()
one_not_fount = True
while one_not_fount:
# create a class for each object open cv can detect
cap = cv2.VideoCapture(0)
_, img = cap.read()
# img = cv2.imread('10.jpeg')
height, width, _ = img.shape
# resize divide by magic number
# takes image, normalize,
# convert image to format for yolo3 no corp
blob = cv2.dnn.blobFromImage(img, 1/255, (416, 416), (0,0,0), swapRB=True, crop=False)
# input processed input in to black box
net.setInput(blob)
# detections
output_layers_names = net.getUnconnectedOutLayersNames()
layerOutputs = net.forward(output_layers_names)
boxes = []
# how sure we are at prediction
confidences = []
class_ids = []
# first for loop used to extract all the information from extraction
for output in layerOutputs:
# second for loop used to extract information for each of the output
for detection in output:
# predction starts from the 6th elements
scores = detection[5:]
# extract the scores with highest probability
class_id = np.argmax(scores)
confidence = scores[class_id]
# if the probability is higher than 70%
if confidence > 0.2:
# rescale picture back to orignal size
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
#fetch postion of upper left conner
x = int(center_x - w/2)
y = int(center_y - h/2)
boxes.append([x,y,w,h])
confidences.append((float(confidence)))
class_ids.append(class_id)
# print(len(boxes))
# normally boxes will overlap since shitty ai
# prevent overlaping
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.2,0.4)
# print(indexes.flatten())
if len(indexes) > 0:
for i in indexes.flatten():
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
confidence = str(round(confidences[i],2))
color = colors[i]
# print(label)
# return1.append(label)
one_not_fount = False
description(label)
print("the item is: ", label)
cv2.rectangle(img, (x,y), (x+w, y+h), color, 2)
cv2.putText(img, label + " " + confidence, (x, y+20), font, 2, (255,255,255), 2)
cv2.imshow('Image',img)
# key = cv2.waitKey(10000)
cv2.waitKey(1)
count = count + 1
if len(return1) == 100:
print(return1)
break
def image_rec():
return1 = []
count = 0
classes = []
print("text file name: ")
user_input = input()
font = cv2.FONT_HERSHEY_PLAIN
colors = np.random.uniform(0,255,size = (100,3))
with open('coco.names', 'r') as f:
classes = f.read().splitlines()
one_not_fount = True
while one_not_fount:
# create a class for each object open cv can detect
img = cv2.imread(user_input)
# _, img = cap.read()
# print(classes)
# img = cv2.imread('10.jpeg')
height, width, _ = img.shape
# resize divide by magic number
# takes image, normalize,
# convert image to format for yolo3 no corp
blob = cv2.dnn.blobFromImage(img, 1/255, (416, 416), (0,0,0), swapRB=True, crop=False)
# input processed input in to black box
net.setInput(blob)
# detections
output_layers_names = net.getUnconnectedOutLayersNames()
layerOutputs = net.forward(output_layers_names)
boxes = []
# how sure we are at prediction
confidences = []
class_ids = []
# first for loop used to extract all the information from extraction
for output in layerOutputs:
# second for loop used to extract information for each of the output
for detection in output:
# predction starts from the 6th elements
scores = detection[5:]
# extract the scores with highest probability
class_id = np.argmax(scores)
confidence = scores[class_id]
# if the probability is higher than 70%
if confidence > 0.2:
# rescale picture back to orignal size
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
#fetch postion of upper left conner
x = int(center_x - w/2)
y = int(center_y - h/2)
boxes.append([x,y,w,h])
confidences.append((float(confidence)))
class_ids.append(class_id)
# print(len(boxes))
# normally boxes will overlap since shitty ai
# prevent overlaping
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.2,0.4)
# print(indexes.flatten())
if len(indexes) > 0:
for i in indexes.flatten():
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
confidence = str(round(confidences[i],2))
color = colors[i]
# print(label)
# return1.append(label)
one_not_fount = False
description(label)
cv2.rectangle(img, (x,y), (x+w, y+h), color, 2)
cv2.putText(img, label + " " + confidence, (x, y+20), font, 2, (255,255,255), 2)
cv2.imshow('Image',img)
# key = cv2.waitKey(10000)
cv2.waitKey(1)
count = count + 1
if len(return1) == 1:
print(return1)
break
def main():
key = 'A'
while key != 'q' and key != 'Q':
display_menu()
key = input()
if key == 'a' or key == 'A':
print("pressed")
video_rec()
os.system("mpg123 audio_current.mp3")
if key == 'b' or key == 'B':
image_rec()
os.system("mpg123 audio_current.mp3")
if key == 'c' or key == 'C':
text_rec()
os.system("mpg123 audio_current.mp3")
#ideo_rec()
#os.system("mpg123 audio_current.mp3")
return
def display_menu():
print("WELCOME TO __HAMILTON__COOL__CODE__")
print("AI ASSISTANT WILL HELP YOU THROW AWAY YOUR TRASH")
print()
print("A -- video recognition")
print("B -- image recognition")
print("C -- from text")
print("Q -- to quit")
print("Choose you option: ")
if __name__ == "__main__":
main()