From 016e6207d438549ebe47eebcad6042a85f4d7730 Mon Sep 17 00:00:00 2001 From: Jimin Jung <97026673+jimin509@users.noreply.github.com> Date: Sun, 11 Dec 2022 19:09:40 +0900 Subject: [PATCH 1/2] =?UTF-8?q?Create=20=EC=B0=B8=EA=B3=A0=EC=9E=90?= =?UTF-8?q?=EB=A3=8C(webcam).py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...63\240\354\236\220\353\243\214(webcam).py" | 61 +++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 "\354\260\270\352\263\240\354\236\220\353\243\214(webcam).py" diff --git "a/\354\260\270\352\263\240\354\236\220\353\243\214(webcam).py" "b/\354\260\270\352\263\240\354\236\220\353\243\214(webcam).py" new file mode 100644 index 0000000..bbcbfbc --- /dev/null +++ "b/\354\260\270\352\263\240\354\236\220\353\243\214(webcam).py" @@ -0,0 +1,61 @@ +import cv2 +import face_recognition + +# load your image +image_to_be_matched = face_recognition.load_image_file('my_image.jpg') +name = "Kyohoon Sim" + +# encoded the loaded image into a feature vector +image_to_be_matched_encoded = face_recognition.face_encodings(image_to_be_matched)[0] +print(image_to_be_matched_encoded) + +# open webcam +webcam = cv2.VideoCapture(0) + +if not webcam.isOpened(): + print("Could not open webcam") + exit() + +# loop through frames +while webcam.isOpened(): + + # read frame from webcam + status, frame = webcam.read() + + if not status: + print("Could not read frame") + exit() + + # face_locations = face_recognition.face_locations(frame) # HoG 기반 얼굴 검출기 + face_locations = face_recognition.face_locations(frame, number_of_times_to_upsample=0, model="cnn") # CNN 기반 얼굴 검출기 + + for face_location in face_locations: + + # Print the location of each face in this image + top, right, bottom, left = face_location + + # You can access the actual face itself like this: + face_image = frame[top:bottom, left:right] + + try: + face_encoded = face_recognition.face_encodings(face_image)[0] + result = face_recognition.compare_faces([image_to_be_matched_encoded], face_encoded, 0.5) + + if result[0] == True: + cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) + Y = top - 10 if top - 10 > 10 else top + 10 + text = name + cv2.putText(frame, text, (left, Y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) + except: + pass + + # display output + cv2.imshow("detect me", frame) + + # press "Q" to stop + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +# release resources +webcam.release() +cv2.destroyAllWindows() From b69a33517507c717946506509a5f79aad82da938 Mon Sep 17 00:00:00 2001 From: Jimin Jung <97026673+jimin509@users.noreply.github.com> Date: Sun, 11 Dec 2022 19:35:15 +0900 Subject: [PATCH 2/2] =?UTF-8?q?Delete=20=EC=B0=B8=EA=B3=A0=EC=9E=90?= =?UTF-8?q?=EB=A3=8C(webcam).py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...63\240\354\236\220\353\243\214(webcam).py" | 61 ------------------- 1 file changed, 61 deletions(-) delete mode 100644 "\354\260\270\352\263\240\354\236\220\353\243\214(webcam).py" diff --git "a/\354\260\270\352\263\240\354\236\220\353\243\214(webcam).py" "b/\354\260\270\352\263\240\354\236\220\353\243\214(webcam).py" deleted file mode 100644 index bbcbfbc..0000000 --- "a/\354\260\270\352\263\240\354\236\220\353\243\214(webcam).py" +++ /dev/null @@ -1,61 +0,0 @@ -import cv2 -import face_recognition - -# load your image -image_to_be_matched = face_recognition.load_image_file('my_image.jpg') -name = "Kyohoon Sim" - -# encoded the loaded image into a feature vector -image_to_be_matched_encoded = face_recognition.face_encodings(image_to_be_matched)[0] -print(image_to_be_matched_encoded) - -# open webcam -webcam = cv2.VideoCapture(0) - -if not webcam.isOpened(): - print("Could not open webcam") - exit() - -# loop through frames -while webcam.isOpened(): - - # read frame from webcam - status, frame = webcam.read() - - if not status: - print("Could not read frame") - exit() - - # face_locations = face_recognition.face_locations(frame) # HoG 기반 얼굴 검출기 - face_locations = face_recognition.face_locations(frame, number_of_times_to_upsample=0, model="cnn") # CNN 기반 얼굴 검출기 - - for face_location in face_locations: - - # Print the location of each face in this image - top, right, bottom, left = face_location - - # You can access the actual face itself like this: - face_image = frame[top:bottom, left:right] - - try: - face_encoded = face_recognition.face_encodings(face_image)[0] - result = face_recognition.compare_faces([image_to_be_matched_encoded], face_encoded, 0.5) - - if result[0] == True: - cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) - Y = top - 10 if top - 10 > 10 else top + 10 - text = name - cv2.putText(frame, text, (left, Y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) - except: - pass - - # display output - cv2.imshow("detect me", frame) - - # press "Q" to stop - if cv2.waitKey(1) & 0xFF == ord('q'): - break - -# release resources -webcam.release() -cv2.destroyAllWindows()