-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmergin_motion_Detection.py
More file actions
88 lines (65 loc) · 2.37 KB
/
mergin_motion_Detection.py
File metadata and controls
88 lines (65 loc) · 2.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import cv2 ,time
import pandas as pd
from datetime import datetime
face_cascade=cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
video=cv2.VideoCapture(0)
# video=cv2.VideoCapture(0,cv2.CAP_DSHOW)
# video=cv2.VideoCapture("people1.mp4")
video.read()
time.sleep(2)
times=[]
status_list=[None,None]
first_frame=None
df=pd.DataFrame(columns=["Start","End"])
while True:
check, frame= video.read()
status=0
gray= cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
gray=cv2.GaussianBlur(gray,(21,21),0) #### Convert for more resolution and accuracy to Gaussian
if first_frame is None:
first_frame=gray
continue
# Delta frame difference between furst frame anthe current frame this differne will give another image
delta_frame=cv2.absdiff(first_frame,gray)
thresh_frame=cv2.threshold(delta_frame, 30 , 255, cv2.THRESH_BINARY)[1]
#to remove the black area and smooth the images
thresh_frame=cv2.dilate(thresh_frame,None,iterations=2)
(cnts,_)= cv2.findContours(thresh_frame.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
'''
The function dilates the source image using the specified structuring element that determines the .
shape of a pixel neighborhood over which the maximum is taken
The contours
are a useful tool for shape analysis and object detection and recognition
'''
for contour in cnts:
if cv2.contourArea(contour) < 10000:
continue
status=1
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5 )
# continue
(x,y,w,h)=cv2.boundingRect(faces)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),3)
status_list.append(status)
if status_list[-1]==1 and status_list[-2]==0:
times.append(datetime.now())
if status_list[-1]==0 and status_list[-2]==1:
times.append(datetime.now())
cv2.imshow('Color Frame',gray)
cv2.imshow("delta",delta_frame)
cv2.imshow("Threshold frame",thresh_frame)
key=cv2.waitKey(1)
if key==ord('q'):
if status==1:
times.append(datetime.now())
break
print(status_list)
print(times)
# print(gray)
for i in range(0,len(times),2):
df=df.append({"Start":times[i],"End":times[i+1] },ignore_index=True )
df.to_csv("Times.csv")
video.release
cv2.destroyAllWindows