-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathstream.py
More file actions
133 lines (99 loc) · 3.5 KB
/
stream.py
File metadata and controls
133 lines (99 loc) · 3.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import streamlit as st
import pandas as pd
import numpy as np
import sys
import tensorflow as tf
from keras.preprocessing import image
from tensorflow.keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input, decode_predictions
import numpy as np
import cv2
# from keras.models import load_model
# load model
model = InceptionV3()
def predict(img, model):
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
p = decode_predictions(preds, top=1)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
# print('Predicted:', decode_predictions(preds, top=1)[0])
for (i, (imagenetID, label, prob)) in enumerate(p[0]):
label = ("{}: {:.2f}%".format(label, prob * 100))
st.markdown(label)
def secpred(frame, model):
# pre-process the image for model prediction
img = cv2.resize(frame, (299, 299))
img = img.astype(np.float32)
img = np.expand_dims(img, axis=0)
img /= 255.0
# predict using the Inceptionv3 model
prediction = model.predict(img)
# Convert the prediction into text
pred_text = tf.keras.applications.inception_v3.decode_predictions(prediction, top=1)
for (i, (imagenetID, label, prob)) in enumerate(pred_text[0]):
pred_class = label
return pred_class
def obj_det(search, frame, model):
label = secpred(frame, model)
label = label.lower()
if label.find(search) > -1:
st.image(frame, caption=label)
#return sys.exit()
else:
pass
# st.text('Not Found')
# return sys.exit()
def main():
st.title('Object Detection 1.0')
# upload video
vid_file = st.file_uploader("Upload Video", type=['mp4', 'mkv', 'avi'])
if vid_file is not None:
path = vid_file.name
with open(path, mode="wb") as f:
f.write(vid_file.read())
st.success("File Saved")
# capture video
cap = cv2.VideoCapture(path)
i = 0
if st.button("Detect"):
while (cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
path2 = './frames/fr' + str(i) + ' .jpg'
cv2.imwrite(path2, frame)
img_path = path2
img = image.load_img(img_path, target_size=(299, 299))
predict(img, model)
i += 1
cap.release()
# output.release()
cv2.destroyAllWindows()
key = st.text_input('Search')
key = key.lower()
if key is not None:
if st.button("Search for an object"):
# Start the video prediction loop
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Perform object detection
obj_det(key, frame, model)
cap.release()
#output.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
#st.header("")
st.markdown("""
<style>
.big-font {
font-size:300px !important;
}
</style>
""", unsafe_allow_html=True)
st.markdown('<p class="small-font">created by Russell and Brenda</p>', unsafe_allow_html=True)