From f282f701a7abdf3185655e37b3eaba9d37888156 Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 7 Nov 2025 14:46:41 +0100 Subject: [PATCH 1/5] fix: improve camera cleanup and reinitialization process --- vilib.egg-info/PKG-INFO | 80 +++ vilib.egg-info/SOURCES.txt | 20 + vilib.egg-info/dependency_links.txt | 1 + vilib.egg-info/top_level.txt | 1 + vilib/camera_close_fix.txt | 21 + vilib/vilib.py | 18 +- vilib/vilib.py.backup | 818 ++++++++++++++++++++++++++++ 7 files changed, 958 insertions(+), 1 deletion(-) create mode 100644 vilib.egg-info/PKG-INFO create mode 100644 vilib.egg-info/SOURCES.txt create mode 100644 vilib.egg-info/dependency_links.txt create mode 100644 vilib.egg-info/top_level.txt create mode 100644 vilib/camera_close_fix.txt create mode 100644 vilib/vilib.py.backup diff --git a/vilib.egg-info/PKG-INFO b/vilib.egg-info/PKG-INFO new file mode 100644 index 0000000..30e16ce --- /dev/null +++ b/vilib.egg-info/PKG-INFO @@ -0,0 +1,80 @@ +Metadata-Version: 2.4 +Name: vilib +Version: 0.3.18 +Summary: Vision Library for Raspberry Pi +Author-email: SunFounder +Project-URL: Homepage, https://github.com/sunfounder/vilib +Project-URL: Bug Tracker, https://github.com/sunfounder/vilib/issues +Keywords: vilib,sunfounder,opencv,image process,visual process,sunfounder +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) +Classifier: Operating System :: POSIX :: Linux +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE +Dynamic: license-file + + +# vilib -- Vision Library for Raspberry Pi + +Image visual processing library with multiple functions such as color recognition, face recognition, hands detection, image_classification, objects_detection, Wireless video transmission, etc... + +## Quick Links + +- [Links](#links) +- [Install](#install) +- [Usage](#usage) +- [Update](#update) +- [About SunFounder](#about-sunfounder) +- [Contact us](#contact-us) + +## Docs + + + +## Links + +- our official website: +- picamera2: +- picamera2-manual: +- geoogle mediapipe: + +## Install + +Download this repository to your Raspberry Pi: + +```bash +cd ~ +git clone -b picamera2 https://github.com/sunfounder/vilib.git --depth 1 +``` + +Install + +```bash +cd ~/vilib +sudo python3 install.py +``` + +## Usage + +```bash +cd ~/vilib/examples +sudo python3 xxx.py +``` + +Stop running the example by using Ctrl+C + +## Update + +- [CHANGELOG] + +## About SunFounder + +SunFounder is a technology company focused on Raspberry Pi and Arduino open source community development. Committed to the promotion of open source culture, we strives to bring the fun of electronics making to people all around the world and enable everyone to be a maker. Our products include learning kits, development boards, robots, sensor modules and development tools. In addition to high quality products, SunFounder also offers video tutorials to help you make your own project. If you have interest in open source or making something cool, welcome to join us! + +## Contact us + +E-mail: + + +[CHANGELOG]:https://github.com/sunfounder/vilib/blob/master/CHANGELOG.md diff --git a/vilib.egg-info/SOURCES.txt b/vilib.egg-info/SOURCES.txt new file mode 100644 index 0000000..54a67c8 --- /dev/null +++ b/vilib.egg-info/SOURCES.txt @@ -0,0 +1,20 @@ +LICENSE +README.md +pyproject.toml +vilib/__init__.py +vilib/color_detection.py +vilib/face_detection.py +vilib/hands_detection.py +vilib/image_classification.py +vilib/mediapipe_object_detection.py +vilib/objects_detection.py +vilib/pose_detection.py +vilib/qrcode_recognition.py +vilib/traffic_sign_detection.py +vilib/utils.py +vilib/version.py +vilib/vilib.py +vilib.egg-info/PKG-INFO +vilib.egg-info/SOURCES.txt +vilib.egg-info/dependency_links.txt +vilib.egg-info/top_level.txt \ No newline at end of file diff --git a/vilib.egg-info/dependency_links.txt b/vilib.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/vilib.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/vilib.egg-info/top_level.txt b/vilib.egg-info/top_level.txt new file mode 100644 index 0000000..9a425de --- /dev/null +++ b/vilib.egg-info/top_level.txt @@ -0,0 +1 @@ +vilib diff --git a/vilib/camera_close_fix.txt b/vilib/camera_close_fix.txt new file mode 100644 index 0000000..1056e3c --- /dev/null +++ b/vilib/camera_close_fix.txt @@ -0,0 +1,21 @@ + def camera_close(): + if Vilib.camera_thread != None: + Vilib.camera_run = False + time.sleep(0.1) + # Wait for camera thread to finish + if Vilib.camera_thread.is_alive(): + Vilib.camera_thread.join(timeout=2.0) + + # Properly close and reinitialize Picamera2 + try: + if Vilib.picam2 is not None: + Vilib.picam2.close() + time.sleep(0.1) + Vilib.picam2 = Picamera2() + except Exception as e: + print(f"Warning during camera cleanup: {e}") + # Force recreation of Picamera2 object + Vilib.picam2 = Picamera2() + + # Reset thread reference + Vilib.camera_thread = None diff --git a/vilib/vilib.py b/vilib/vilib.py index 5144f10..cc552f1 100644 --- a/vilib/vilib.py +++ b/vilib/vilib.py @@ -379,7 +379,23 @@ def camera_close(): if Vilib.camera_thread != None: Vilib.camera_run = False time.sleep(0.1) - + # Wait for camera thread to finish + if Vilib.camera_thread.is_alive(): + Vilib.camera_thread.join(timeout=2.0) + + # Properly close and reinitialize Picamera2 + try: + if Vilib.picam2 is not None: + Vilib.picam2.close() + time.sleep(0.1) + Vilib.picam2 = Picamera2() + except Exception as e: + print(f"Warning during camera cleanup: {e}") + # Force recreation of Picamera2 object + Vilib.picam2 = Picamera2() + + # Reset thread reference + Vilib.camera_thread = None @staticmethod def display(local=True, web=True): # cheack camera thread is_alive diff --git a/vilib/vilib.py.backup b/vilib/vilib.py.backup new file mode 100644 index 0000000..5144f10 --- /dev/null +++ b/vilib/vilib.py.backup @@ -0,0 +1,818 @@ +#!/usr/bin/env python3 + +# whther print welcome message +import os +import logging + +from .version import __version__ +if 'VILIB_WELCOME' not in os.environ or os.environ['VILIB_WELCOME'] not in [ + 'False', '0' +]: + from pkg_resources import require + picamera2_version = require('picamera2')[0].version + print(f'vilib {__version__} launching ...') + print(f'picamera2 {picamera2_version}') + +# set libcamera2 log level +os.environ['LIBCAMERA_LOG_LEVELS'] = '*:ERROR' +from picamera2 import Picamera2 +import libcamera + +import cv2 +import numpy as np +from PIL import Image, ImageDraw, ImageFont + +from flask import Flask, render_template, Response + +import time +import datetime +import threading +from multiprocessing import Process, Manager + +from .utils import * + +# user and user home directory +# ================================================================= +user = os.popen("echo ${SUDO_USER:-$(who -m | awk '{ print $1 }')}").readline().strip() +user_home = os.popen(f'getent passwd {user} | cut -d: -f 6').readline().strip() +# print(f"user: {user}") +# print(f"user_home: {user_home}") + +# Default path for pictures and videos +DEFAULLT_PICTURES_PATH = '%s/Pictures/vilib/'%user_home +DEFAULLT_VIDEOS_PATH = '%s/Videos/vilib/'%user_home + +# utils +# ================================================================= +def findContours(img): + _tuple = cv2.findContours(img, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) + # compatible with opencv3.x and openc4.x + if len(_tuple) == 3: + _, contours, hierarchy = _tuple + else: + contours, hierarchy = _tuple + return contours, hierarchy + +# flask +# ================================================================= +os.environ['FLASK_DEBUG'] = 'development' +app = Flask(__name__) + +log = logging.getLogger('werkzeug') +log.setLevel(logging.ERROR) + +@app.route('/') +def index(): + """Video streaming home page.""" + return render_template('index.html') + +def get_frame(): + return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() + +def get_qrcode_pictrue(): + return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() + +def get_png_frame(): + return cv2.imencode('.png', Vilib.flask_img)[1].tobytes() + +def get_qrcode(): + while Vilib.qrcode_img_encode is None: + time.sleep(0.2) + + return Vilib.qrcode_img_encode + +def gen(): + """Video streaming generator function.""" + while True: + # start_time = time.time() + frame = get_frame() + yield (b'--frame\r\n' + b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') + time.sleep(0.03) + # end_time = time.time() - start_time + # print('flask fps:%s'%int(1/end_time)) + +@app.route('/mjpg') ## video +def video_feed(): + # from camera import Camera + """Video streaming route. Put this in the src attribute of an img tag.""" + if Vilib.web_display_flag: + response = Response(gen(), + mimetype='multipart/x-mixed-replace; boundary=frame') + response.headers.add("Access-Control-Allow-Origin", "*") + return response + else: + tip = ''' + Please enable web display first: + Vilib.display(web=True) +''' + html = f"

{tip}

" + return Response(html, mimetype='text/html') + +@app.route('/mjpg.jpg') # jpg +def video_feed_jpg(): + # from camera import Camera + """Video streaming route. Put this in the src attribute of an img tag.""" + response = Response(get_frame(), mimetype="image/jpeg") + response.headers.add("Access-Control-Allow-Origin", "*") + return response + +@app.route('/mjpg.png') # png +def video_feed_png(): + # from camera import Camera + """Video streaming route. Put this in the src attribute of an img tag.""" + response = Response(get_png_frame(), mimetype="image/png") + response.headers.add("Access-Control-Allow-Origin", "*") + return response + +@app.route("/qrcode") +def qrcode_feed(): + qrcode_html = ''' + + + + QRcode + + + + QR Code + + +''' + return Response(qrcode_html, mimetype='text/html') + + +@app.route("/qrcode.png") +def qrcode_feed_png(): + """Video streaming route. Put this in the src attribute of an img tag.""" + if Vilib.web_qrcode_flag: + # response = Response(get_qrcode(), + # mimetype='multipart/x-mixed-replace; boundary=frame') + response = Response(get_qrcode(), mimetype="image/png") + response.headers.add("Access-Control-Allow-Origin", "*") + return response + else: + tip = ''' + Please enable web display first: + Vilib.display_qrcode(web=True) +''' + html = f"

{tip}

" + return Response(html, mimetype='text/html') + +def web_camera_start(): + try: + Vilib.flask_start = True + app.run(host='0.0.0.0', port=9000, threaded=True, debug=False) + except Exception as e: + print(e) + +# Vilib +# ================================================================= +class Vilib(object): + + picam2 = Picamera2() + + camera_size = (640, 480) + camera_width = 640 + camera_height = 480 + camera_vflip = False + camera_hflip = False + camera_run = False + + flask_thread = None + camera_thread = None + flask_start = False + + qrcode_display_thread = None + qrcode_making_completed = False + qrcode_img = Manager().list(range(1)) + qrcode_img_encode = None + qrcode_win_name = 'qrcode' + + img = Manager().list(range(1)) + flask_img = Manager().list(range(1)) + + Windows_Name = "picamera" + imshow_flag = False + web_display_flag = False + imshow_qrcode_flag = False + web_qrcode_flag = False + + draw_fps = False + fps_origin = (camera_width-105, 20) + fps_size = 0.6 + fps_color = (255, 255, 255) + + detect_obj_parameter = {} + color_detect_color = None + face_detect_sw = False + hands_detect_sw = False + pose_detect_sw = False + image_classify_sw = False + image_classification_model = None + image_classification_labels = None + objects_detect_sw = False + objects_detection_model = None + objects_detection_labels = None + qrcode_detect_sw = False + traffic_detect_sw = False + + @staticmethod + def get_instance(): + return Vilib.picam2 + + @staticmethod + def set_controls(controls): + Vilib.picam2.set_controls(controls) + + @staticmethod + def get_controls(): + return Vilib.picam2.capture_metadata() + + @staticmethod + def camera(): + Vilib.camera_width = Vilib.camera_size[0] + Vilib.camera_height = Vilib.camera_size[1] + + picam2 = Vilib.picam2 + + preview_config = picam2.preview_configuration + # preview_config.size = (800, 600) + preview_config.size = Vilib.camera_size + preview_config.format = 'RGB888' # 'XRGB8888', 'XBGR8888', 'RGB888', 'BGR888', 'YUV420' + preview_config.transform = libcamera.Transform( + hflip=Vilib.camera_hflip, + vflip=Vilib.camera_vflip + ) + preview_config.colour_space = libcamera.ColorSpace.Sycc() + preview_config.buffer_count = 4 + preview_config.queue = True + # preview_config.raw = {'size': (2304, 1296)} + preview_config.controls = {'FrameRate': 60} # change picam2.capture_array() takes time + + try: + picam2.start() + except Exception as e: + print(f"\033[38;5;1mError:\033[0m\n{e}") + print("\nPlease check whether the camera is connected well" +\ + "You can use the \"libcamea-hello\" command to test the camera" + ) + exit(1) + Vilib.camera_run = True + Vilib.fps_origin = (Vilib.camera_width-105, 20) + fps = 0 + start_time = 0 + framecount = 0 + try: + start_time = time.time() + while True: + # ----------- extract image data ---------------- + # st = time.time() + Vilib.img = picam2.capture_array() + # print(f'picam2.capture_array(): {time.time() - st:.6f}') + # st = time.time() + + # ----------- image gains and effects ---------------- + + # ----------- image detection and recognition ---------------- + Vilib.img = Vilib.color_detect_func(Vilib.img) + Vilib.img = Vilib.face_detect_func(Vilib.img) + Vilib.img = Vilib.traffic_detect_fuc(Vilib.img) + Vilib.img = Vilib.qrcode_detect_func(Vilib.img) + + Vilib.img = Vilib.image_classify_fuc(Vilib.img) + Vilib.img = Vilib.object_detect_fuc(Vilib.img) + Vilib.img = Vilib.hands_detect_fuc(Vilib.img) + Vilib.img = Vilib.pose_detect_fuc(Vilib.img) + + # ----------- calculate fps and draw fps ---------------- + # calculate fps + framecount += 1 + elapsed_time = float(time.time() - start_time) + if (elapsed_time > 1): + fps = round(framecount/elapsed_time, 1) + framecount = 0 + start_time = time.time() + + # print(f"elapsed_time: {elapsed_time}, fps: {fps}") + + # draw fps + if Vilib.draw_fps: + cv2.putText( + # img, # image + Vilib.img, + f"FPS: {fps}", # text + Vilib.fps_origin, # origin + cv2.FONT_HERSHEY_SIMPLEX, # font + Vilib.fps_size, # font_scale + Vilib.fps_color, # font_color + 1, # thickness + cv2.LINE_AA, # line_type: LINE_8 (default), LINE_4, LINE_AA + ) + + # ---- copy img for flask --- + # st = time.time() + Vilib.flask_img = Vilib.img + # print(f'vilib.flask_img: {time.time() - st:.6f}') + + # ----------- display on desktop ---------------- + if Vilib.imshow_flag == True: + try: + try: + prop = cv2.getWindowProperty(Vilib.Windows_Name, cv2.WND_PROP_VISIBLE) + qrcode_prop = cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) + if prop < 1 or qrcode_prop < 1: + break + except: + pass + + cv2.imshow(Vilib.Windows_Name, Vilib.img) + + if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed: + Vilib.qrcode_making_completed = False + cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) + + cv2.waitKey(1) + + except Exception as e: + Vilib.imshow_flag = False + print(f"imshow failed:\n {e}") + break + + # ----------- exit ---------------- + if Vilib.camera_run == False: + break + + # print(f'loop end: {time.time() - st:.6f}') + + except KeyboardInterrupt as e: + print(e) + finally: + picam2.close() + cv2.destroyAllWindows() + + @staticmethod + def camera_start(vflip=False, hflip=False, size=None): + if size is not None: + Vilib.camera_size = size + Vilib.camera_hflip = hflip + Vilib.camera_vflip = vflip + Vilib.camera_thread = threading.Thread(target=Vilib.camera, name="vilib") + Vilib.camera_thread.daemon = False + Vilib.camera_thread.start() + while not Vilib.camera_run: + time.sleep(0.1) + + @staticmethod + def camera_close(): + if Vilib.camera_thread != None: + Vilib.camera_run = False + time.sleep(0.1) + + @staticmethod + def display(local=True, web=True): + # cheack camera thread is_alive + if Vilib.camera_thread != None and Vilib.camera_thread.is_alive(): + # check gui + if local == True: + if 'DISPLAY' in os.environ.keys(): + Vilib.imshow_flag = True + print("Imgshow start ...") + else: + Vilib.imshow_flag = False + print("Local display failed, because there is no gui.") + # web video + if web == True: + Vilib.web_display_flag = True + print("\nWeb display on:") + wlan0, eth0 = getIP() + if wlan0 != None: + print(f" http://{wlan0}:9000/mjpg") + if eth0 != None: + print(f" http://{eth0}:9000/mjpg") + print() # new line + + # ----------- flask_thread ---------------- + if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: + print('Starting web streaming ...') + Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) + Vilib.flask_thread.daemon = True + Vilib.flask_thread.start() + else: + print('Error: Please execute < camera_start() > first.') + + @staticmethod + def show_fps(color=None, fps_size=None, fps_origin=None): + if color is not None: + Vilib.fps_color = color + if fps_size is not None: + Vilib.fps_size = fps_size + if fps_origin is not None: + Vilib.fps_origin = fps_origin + + Vilib.draw_fps = True + + @staticmethod + def hide_fps(): + Vilib.draw_fps = False + + # take photo + # ================================================================= + @staticmethod + def take_photo(photo_name, path=DEFAULLT_PICTURES_PATH): + # ----- check path ----- + if not os.path.exists(path): + # print('Path does not exist. Creating path now ... ') + os.makedirs(name=path, mode=0o751, exist_ok=True) + time.sleep(0.01) + # ----- save photo ----- + status = False + for _ in range(5): + if Vilib.img is not None: + status = cv2.imwrite(path + '/' + photo_name +'.jpg', Vilib.img) + break + else: + time.sleep(0.01) + else: + status = False + + # if status: + # print('The photo is saved as '+path+'/'+photo_name+'.jpg') + # else: + # print('Photo save failed .. ') + + return status + + + # record video + # ================================================================= + rec_video_set = {} + + rec_video_set["fourcc"] = cv2.VideoWriter_fourcc(*'XVID') + #rec_video_set["fourcc"] = cv2.cv.CV_FOURCC("D", "I", "B", " ") + + rec_video_set["fps"] = 30.0 + rec_video_set["framesize"] = (640, 480) + rec_video_set["isColor"] = True + + rec_video_set["name"] = "default" + rec_video_set["path"] = DEFAULLT_VIDEOS_PATH + + rec_video_set["start_flag"] = False + rec_video_set["stop_flag"] = False + + rec_thread = None + + @staticmethod + def rec_video_work(): + if not os.path.exists(Vilib.rec_video_set["path"]): + # print('Path does not exist. Creating path now ... ') + os.makedirs(name=Vilib.rec_video_set["path"], + mode=0o751, + exist_ok=True + ) + time.sleep(0.01) + video_out = cv2.VideoWriter(Vilib.rec_video_set["path"]+'/'+Vilib.rec_video_set["name"]+'.avi', + Vilib.rec_video_set["fourcc"], Vilib.rec_video_set["fps"], + Vilib.rec_video_set["framesize"], Vilib.rec_video_set["isColor"]) + + while True: + if Vilib.rec_video_set["start_flag"] == True: + # video_out.write(Vilib.img_array[0]) + video_out.write(Vilib.img) + if Vilib.rec_video_set["stop_flag"] == True: + video_out.release() # note need to release the video writer + Vilib.rec_video_set["start_flag"] == False + break + + @staticmethod + def rec_video_run(): + if Vilib.rec_thread != None: + Vilib.rec_video_stop() + Vilib.rec_video_set["stop_flag"] = False + Vilib.rec_thread = threading.Thread(name='rec_video', target=Vilib.rec_video_work) + Vilib.rec_thread.daemon = True + Vilib.rec_thread.start() + + @staticmethod + def rec_video_start(): + Vilib.rec_video_set["start_flag"] = True + Vilib.rec_video_set["stop_flag"] = False + + @staticmethod + def rec_video_pause(): + Vilib.rec_video_set["start_flag"] = False + + @staticmethod + def rec_video_stop(): + Vilib.rec_video_set["start_flag"] == False + Vilib.rec_video_set["stop_flag"] = True + if Vilib.rec_thread != None: + Vilib.rec_thread.join(3) + Vilib.rec_thread = None + + # color detection + # ================================================================= + @staticmethod + def color_detect(color="red"): + ''' + :param color: could be red, green, blue, yellow , orange, purple + ''' + Vilib.color_detect_color = color + from .color_detection import color_detect_work, color_obj_parameter + Vilib.color_detect_work = color_detect_work + Vilib.color_obj_parameter = color_obj_parameter + Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] + Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] + Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] + Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] + Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] + + @staticmethod + def color_detect_func(img): + if Vilib.color_detect_color is not None \ + and Vilib.color_detect_color != 'close' \ + and hasattr(Vilib, "color_detect_work"): + img = Vilib.color_detect_work(img, Vilib.camera_width, Vilib.camera_height, Vilib.color_detect_color) + Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] + Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] + Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] + Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] + Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] + return img + + @staticmethod + def close_color_detection(): + Vilib.color_detect_color = None + + # face detection + # ================================================================= + @staticmethod + def face_detect_switch(flag=False): + Vilib.face_detect_sw = flag + if Vilib.face_detect_sw: + from .face_detection import face_detect, set_face_detection_model, face_obj_parameter + Vilib.face_detect_work = face_detect + Vilib.set_face_detection_model = set_face_detection_model + Vilib.face_obj_parameter = face_obj_parameter + Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] + Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] + Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] + Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] + Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] + + @staticmethod + def face_detect_func(img): + if Vilib.face_detect_sw and hasattr(Vilib, "face_detect_work"): + img = Vilib.face_detect_work(img, Vilib.camera_width, Vilib.camera_height) + Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] + Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] + Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] + Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] + Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] + return img + + # traffic sign detection + # ================================================================= + @staticmethod + def traffic_detect_switch(flag=False): + Vilib.traffic_detect_sw = flag + if Vilib.traffic_detect_sw: + from .traffic_sign_detection import traffic_sign_detect, traffic_sign_obj_parameter + Vilib.traffic_detect_work = traffic_sign_detect + Vilib.traffic_sign_obj_parameter = traffic_sign_obj_parameter + Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] + Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] + Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] + Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] + Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] + Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] + + @staticmethod + def traffic_detect_fuc(img): + if Vilib.traffic_detect_sw and hasattr(Vilib, "traffic_detect_work"): + img = Vilib.traffic_detect_work(img, border_rgb=(255, 0, 0)) + Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] + Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] + Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] + Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] + Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] + Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] + return img + + # qrcode recognition + # ================================================================= + @staticmethod + def qrcode_detect_switch(flag=False): + Vilib.qrcode_detect_sw = flag + if Vilib.qrcode_detect_sw: + from .qrcode_recognition import qrcode_recognize, qrcode_obj_parameter + Vilib.qrcode_recognize = qrcode_recognize + Vilib.qrcode_obj_parameter = qrcode_obj_parameter + Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] + Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] + Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] + Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] + Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] + Vilib.detect_obj_parameter['qr_list'] = Vilib.qrcode_obj_parameter['list'] + + @staticmethod + def qrcode_detect_func(img): + if Vilib.qrcode_detect_sw and hasattr(Vilib, "qrcode_recognize"): + img = Vilib.qrcode_recognize(img, border_rgb=(255, 0, 0)) + Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] + Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] + Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] + Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] + Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] + return img + + # qrcode making + # ================================================================= + @staticmethod + def make_qrcode(data, + path=None, + version=1, + box_size=10, + border=4, + fill_color=(132, 112, 255), + back_color=(255, 255, 255) + ): + import qrcode # https://github.com/lincolnloop/python-qrcode + + qr = qrcode.QRCode( + version=version, + error_correction=qrcode.constants.ERROR_CORRECT_L, + box_size=box_size, + border=border, + ) + qr.add_data(data) + qr.make(fit=True) + qr_pil = qr.make_image(fill_color=fill_color, + back_color=back_color) + if path != None: + qr_pil.save(path) + + Vilib.qrcode_img = cv2.cvtColor(np.array(qr_pil), cv2.COLOR_RGB2BGR) + Vilib.qrcode_making_completed = True + + if Vilib.web_qrcode_flag: + Vilib.qrcode_img_encode = cv2.imencode('.jpg', Vilib.qrcode_img)[1].tobytes() + + + + @staticmethod + def display_qrcode_work(): + while True: + if Vilib.imshow_flag: + time.sleep(0.1) + continue + + # ----------- display qrcode on desktop ---------------- + if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed : + Vilib.qrcode_making_completed = False + try: + if len(Vilib.qrcode_img) > 10: + cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) + cv2.waitKey(1) + if cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) == 0: + cv2.destroyWindow(Vilib.qrcode_win_name) + except Exception as e: + Vilib.imshow_qrcode_flag = False + print(f"imshow qrcode failed:\n {e}") + break + time.sleep(0.1) + + @staticmethod + def display_qrcode(local=True, web=True): + # check gui + if local == True: + if 'DISPLAY' in os.environ.keys(): + Vilib.imshow_qrcode_flag = True + print("Imgshow qrcode start ...") + else: + Vilib.imshow_qrcode_flag = False + print("Local display failed, because there is no gui.") + # web video + if web == True: + Vilib.web_qrcode_flag = True + print(f'QRcode display on:') + wlan0, eth0 = getIP() + if wlan0 != None: + print(f" http://{wlan0}:9000/qrcode") + if eth0 != None: + print(f" http://{eth0}:9000/qrcode") + print() # new line + + # ----------- flask_thread ---------------- + if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: + print('Starting web streaming ...') + Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) + Vilib.flask_thread.daemon = True + Vilib.flask_thread.start() + + if Vilib.qrcode_display_thread == None or Vilib.qrcode_display_thread.is_alive() == False: + Vilib.qrcode_display_thread = threading.Thread(name='qrcode_display',target=Vilib.display_qrcode_work) + Vilib.qrcode_display_thread.daemon = True + Vilib.qrcode_display_thread.start() + + + # image classification + # ================================================================= + @staticmethod + def image_classify_switch(flag=False): + from .image_classification import image_classification_obj_parameter + Vilib.image_classify_sw = flag + Vilib.image_classification_obj_parameter = image_classification_obj_parameter + + @staticmethod + def image_classify_set_model(path): + if not os.path.exists(path): + raise ValueError('incorrect model path ') + Vilib.image_classification_model = path + + @staticmethod + def image_classify_set_labels(path): + if not os.path.exists(path): + raise ValueError('incorrect labels path ') + Vilib.image_classification_labels = path + + @staticmethod + def image_classify_fuc(img): + if Vilib.image_classify_sw == True: + # print('classify_image starting') + from .image_classification import classify_image + img = classify_image(image=img, + model=Vilib.image_classification_model, + labels=Vilib.image_classification_labels) + return img + + # objects detection + # ================================================================= + @staticmethod + def object_detect_switch(flag=False): + Vilib.objects_detect_sw = flag + if Vilib.objects_detect_sw == True: + from .objects_detection import object_detection_list_parameter + Vilib.object_detection_list_parameter = object_detection_list_parameter + + @staticmethod + def object_detect_set_model(path): + if not os.path.exists(path): + raise ValueError('incorrect model path ') + Vilib.objects_detection_model = path + + @staticmethod + def object_detect_set_labels(path): + if not os.path.exists(path): + raise ValueError('incorrect labels path ') + Vilib.objects_detection_labels = path + + @staticmethod + def object_detect_fuc(img): + if Vilib.objects_detect_sw == True: + # print('detect_objects starting') + from .objects_detection import detect_objects + img = detect_objects(image=img, + model=Vilib.objects_detection_model, + labels=Vilib.objects_detection_labels) + return img + + # hands detection + # ================================================================= + @staticmethod + def hands_detect_switch(flag=False): + from .hands_detection import DetectHands + Vilib.detect_hands = DetectHands() + Vilib.hands_detect_sw = flag + + @staticmethod + def hands_detect_fuc(img): + if Vilib.hands_detect_sw == True: + img, Vilib.detect_obj_parameter['hands_joints'] = Vilib.detect_hands.work(image=img) + return img + + # pose detection + # ================================================================= + @staticmethod + def pose_detect_switch(flag=False): + from .pose_detection import DetectPose + Vilib.pose_detect = DetectPose() + Vilib.pose_detect_sw = flag + + @staticmethod + def pose_detect_fuc(img): + if Vilib.pose_detect_sw == True and hasattr(Vilib, "pose_detect"): + img, Vilib.detect_obj_parameter['body_joints'] = Vilib.pose_detect.work(image=img) + return img From c1e6c81191020908aa927bbee37c608e88e53f77 Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 7 Nov 2025 15:04:00 +0100 Subject: [PATCH 2/5] Implement comprehensive fixes for camera threading issues in Vilib - Enhanced the `camera_close` method to ensure proper closure and reinitialization of the Picamera2 object, including improved error handling and timeout adjustments. - Updated the `camera` method to ensure robust configuration handling, including checks for existing configurations and error handling during setup. --- =3.20.0 | 1 + build/lib/vilib/__init__.py | 5 + build/lib/vilib/color_detection.py | 165 ++++ build/lib/vilib/comprehensive_fix.py | 84 ++ build/lib/vilib/face_detection.py | 131 +++ build/lib/vilib/hands_detection.py | 43 + build/lib/vilib/image_classification.py | 213 +++++ build/lib/vilib/mediapipe_object_detection.py | 137 +++ build/lib/vilib/objects_detection.py | 242 +++++ build/lib/vilib/pose_detection.py | 39 + build/lib/vilib/qrcode_recognition.py | 90 ++ build/lib/vilib/traffic_sign_detection.py | 341 +++++++ build/lib/vilib/utils.py | 47 + build/lib/vilib/version.py | 1 + build/lib/vilib/vilib.py | 856 ++++++++++++++++++ build/lib/vilib/vilib_debug.py | 846 +++++++++++++++++ vilib.egg-info/PKG-INFO | 80 -- vilib.egg-info/SOURCES.txt | 20 - vilib.egg-info/dependency_links.txt | 1 - vilib.egg-info/top_level.txt | 1 - vilib/camera_close_fix_final.txt | 33 + vilib/camera_close_fix_v2.txt | 30 + vilib/comprehensive_fix.py | 84 ++ vilib/vilib.py | 36 +- vilib/vilib_debug.py | 846 +++++++++++++++++ 25 files changed, 4263 insertions(+), 109 deletions(-) create mode 100644 =3.20.0 create mode 100644 build/lib/vilib/__init__.py create mode 100644 build/lib/vilib/color_detection.py create mode 100644 build/lib/vilib/comprehensive_fix.py create mode 100644 build/lib/vilib/face_detection.py create mode 100644 build/lib/vilib/hands_detection.py create mode 100644 build/lib/vilib/image_classification.py create mode 100644 build/lib/vilib/mediapipe_object_detection.py create mode 100644 build/lib/vilib/objects_detection.py create mode 100644 build/lib/vilib/pose_detection.py create mode 100644 build/lib/vilib/qrcode_recognition.py create mode 100644 build/lib/vilib/traffic_sign_detection.py create mode 100644 build/lib/vilib/utils.py create mode 100644 build/lib/vilib/version.py create mode 100644 build/lib/vilib/vilib.py create mode 100644 build/lib/vilib/vilib_debug.py delete mode 100644 vilib.egg-info/PKG-INFO delete mode 100644 vilib.egg-info/SOURCES.txt delete mode 100644 vilib.egg-info/dependency_links.txt delete mode 100644 vilib.egg-info/top_level.txt create mode 100644 vilib/camera_close_fix_final.txt create mode 100644 vilib/camera_close_fix_v2.txt create mode 100644 vilib/comprehensive_fix.py create mode 100644 vilib/vilib_debug.py diff --git a/=3.20.0 b/=3.20.0 new file mode 100644 index 0000000..928e227 --- /dev/null +++ b/=3.20.0 @@ -0,0 +1 @@ +Requirement already satisfied: protobuf in /usr/local/lib/python3.13/dist-packages (6.33.0) diff --git a/build/lib/vilib/__init__.py b/build/lib/vilib/__init__.py new file mode 100644 index 0000000..2a06086 --- /dev/null +++ b/build/lib/vilib/__init__.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python3 +from .vilib import Vilib +from .version import __version__ + + diff --git a/build/lib/vilib/color_detection.py b/build/lib/vilib/color_detection.py new file mode 100644 index 0000000..533d718 --- /dev/null +++ b/build/lib/vilib/color_detection.py @@ -0,0 +1,165 @@ +import cv2 +import numpy as np + + +'''The range of H, S, V in HSV space for colors''' +# You can run ../examples/hsv_threshold_analyzer.py to analyze and adjust these values +color_dict = { + 'red':[[0, 8], [80, 255], [0, 255]], + 'orange':[[12, 18], [80, 255], [80, 255]], + 'yellow':[[20, 60], [60, 255], [120, 255]], + 'green':[[45, 85], [120, 255], [80, 255]], + 'blue':[[92,120], [120, 255], [80, 255]], + 'purple':[[115,155], [30, 255], [60, 255]], + 'magenta':[[160,180], [30, 255], [60, 255]], + } + +'''Define parameters for color detection object''' +color_obj_parameter = {} + +color_obj_parameter['color'] = 'red' # color to be detected + +color_obj_parameter['x'] = 320 # the largest color block center x-axis coordinate +color_obj_parameter['y'] = 240 # the largest color block center y-axis coordinate +color_obj_parameter['w'] = 0 # the largest color block pixel width +color_obj_parameter['h'] = 0 # the largest color block pixel height +color_obj_parameter['n'] = 0 # Number of color blocks detected + + +def color_detect_work(img, width, height, color_name, rectangle_color=(0, 0, 255)): + ''' + Color detection with opencv + + :param img: The detected image data + :type img: list + :param width: The width of the image data + :type width: int + :param height: The height of the image data + :type height: int + :param color_name: The name of the color to be detected. Eg: "red". For supported colors, please see [color_dict]. + :type color_name: str + :param rectangle_color: The color (BGR, tuple) of rectangle. Eg: (0, 0, 255). + :type color_name: tuple + :returns: The image returned after detection. + :rtype: Binary list + ''' + color_obj_parameter['color'] = color_name + + # Reduce image for faster recognition + zoom = 4 # reduction ratio + width_zoom = int(width / zoom) + height_zoom = int(height / zoom) + resize_img = cv2.resize(img, (width_zoom, height_zoom), interpolation=cv2.INTER_LINEAR) + + # Convert the image in BGR to HSV + hsv = cv2.cvtColor(resize_img, cv2.COLOR_BGR2HSV) + + # Set range for red color and define mask + # color_lower = np.array([min(color_dict[color_name]), 60, 60]) + # color_upper = np.array([max(color_dict[color_name]), 255, 255]) + color_lower = np.array([min(color_dict[color_name][0]), min(color_dict[color_name][1]), min(color_dict[color_name][2])]) + color_upper = np.array([max(color_dict[color_name][0]), max(color_dict[color_name][1]), max(color_dict[color_name][2])]) + + mask = cv2.inRange(hsv, color_lower, color_upper) + + if color_name == 'red': + mask_2 = cv2.inRange(hsv, (167, 0, 0), (180, 255, 255)) + mask = cv2.bitwise_or(mask, mask_2) + + # define a 5*5 kernel + kernel_5 = np.ones((5,5), np.uint8) + + # opening the image (erosion followed by dilation), to remove the image noise + open_img = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel_5, iterations=1) + + # Find contours in binary image + _tuple = cv2.findContours(open_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + # compatible with opencv3.x and openc4.x + if len(_tuple) == 3: + _, contours, hierarchy = _tuple + else: + contours, hierarchy = _tuple + + color_obj_parameter['n'] = len(contours) + + if color_obj_parameter['n'] < 1: + color_obj_parameter['x'] = width/2 + color_obj_parameter['y'] = height/2 + color_obj_parameter['w'] = 0 + color_obj_parameter['h'] = 0 + color_obj_parameter['n'] = 0 + else: + # Iterate over all contours + max_area = 0 + for contour in contours: + # Return the coordinate(top left), width and height of contour + x, y, w, h = cv2.boundingRect(contour) + if w >= 8 and h >= 8: + x = x * zoom + y = y * zoom + w = w * zoom + h = h * zoom + # Draw rectangle around the color block + cv2.rectangle(img, # image + (x, y), # start position + (x+w, y+h), # end position + rectangle_color, # color + 2, # thickness + ) + # Draw color name + cv2.putText(img, # image + color_name, # text + (x, y-5), # start position + cv2.FONT_HERSHEY_SIMPLEX, # font + 0.72, # font size + rectangle_color, # color + 1, # thickness + cv2.LINE_AA, # line_type: LINE_8 (default), LINE_4, LINE_AA + ) + else: + continue + + # Save the attribute of the largest color block + object_area = w*h + if object_area > max_area: + max_area = object_area + color_obj_parameter['x'] = int(x + w/2) + color_obj_parameter['y'] = int(y + h/2) + color_obj_parameter['w'] = w + color_obj_parameter['h'] = h + + return img + +# Test +def test(color): + print("color detection: %s"%color) + + cap = cv2.VideoCapture(0) + cap.set(3, 640) + cap.set(4, 480) + + while cap.isOpened(): + success,frame = cap.read() + if not success: + print("Ignoring empty camera frame.") + # If loading a video, use 'break' instead of 'continue'. + continue + + # frame = cv2.flip(frame, -1) # Flip camera vertically + + out_img = color_detect_work(frame, 640, 480, color) + + cv2.imshow('Color detecting ...', out_img) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + if cv2.waitKey(1) & 0xff == 27: # press 'ESC' to quit + break + if cv2.getWindowProperty('Color detecting ...', 1) < 0: + break + + cap.release() + cv2.destroyAllWindows() + +if __name__ == "__main__": + test('red') diff --git a/build/lib/vilib/comprehensive_fix.py b/build/lib/vilib/comprehensive_fix.py new file mode 100644 index 0000000..d23f0b3 --- /dev/null +++ b/build/lib/vilib/comprehensive_fix.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +""" +Comprehensive fix for vilib camera threading issues +This patches both camera_close and camera methods +""" + +def apply_comprehensive_fix(): + # Read the original file + with open('vilib.py', 'r') as f: + content = f.read() + + # 1. Fix camera_close method + old_camera_close = ''' @staticmethod + def camera_close(): + if Vilib.camera_thread != None: + Vilib.camera_run = False + time.sleep(0.1)''' + + new_camera_close = ''' @staticmethod + def camera_close(): + if Vilib.camera_thread != None: + Vilib.camera_run = False + time.sleep(0.2) + # Wait for camera thread to finish + if Vilib.camera_thread.is_alive(): + Vilib.camera_thread.join(timeout=3.0) + + # Properly close and reinitialize Picamera2 + try: + if Vilib.picam2 is not None: + Vilib.picam2.close() + time.sleep(0.2) + + # Recreate Picamera2 object completely fresh + Vilib.picam2 = Picamera2() + + except Exception as e: + print(f"Warning during camera cleanup: {e}") + # Force recreation of Picamera2 object + try: + Vilib.picam2 = Picamera2() + except Exception as e2: + print(f"Failed to reinitialize camera: {e2}") + + # Reset thread reference + Vilib.camera_thread = None''' + + # 2. Fix camera method to be more robust + old_camera_start = ''' preview_config = picam2.preview_configuration + # preview_config.size = (800, 600) + preview_config.size = Vilib.camera_size''' + + new_camera_start = ''' # Ensure we have a fresh configuration + try: + preview_config = picam2.preview_configuration + if preview_config is None: + # Create new configuration if needed + config = picam2.create_preview_configuration() + picam2.configure(config) + preview_config = picam2.preview_configuration + except Exception as e: + print(f"Error getting preview configuration: {e}") + # Try to create a new configuration + config = picam2.create_preview_configuration() + picam2.configure(config) + preview_config = picam2.preview_configuration + + # preview_config.size = (800, 600) + if preview_config is not None: + preview_config.size = Vilib.camera_size''' + + # Apply the fixes + content = content.replace(old_camera_close, new_camera_close) + content = content.replace(old_camera_start, new_camera_start) + + # Write the fixed file + with open('vilib.py', 'w') as f: + f.write(content) + + print("Applied comprehensive vilib fix!") + return True + +if __name__ == "__main__": + apply_comprehensive_fix() diff --git a/build/lib/vilib/face_detection.py b/build/lib/vilib/face_detection.py new file mode 100644 index 0000000..ccf7565 --- /dev/null +++ b/build/lib/vilib/face_detection.py @@ -0,0 +1,131 @@ +import cv2 +# https://github.com/opencv/opencv-python + +'''Define parameters for face detection object''' +# Default model path +face_model_path = '/opt/vilib/haarcascade_frontalface_default.xml' +# face_model_path = '/opt/vilib/haarcascade_profileface.xml' + +face_obj_parameter = {} +face_obj_parameter['x'] = 320 # the largest face block center x-axis coordinate +face_obj_parameter['y'] = 240 # the largest face block center y-axis coordinate +face_obj_parameter['w'] = 0 # the largest face block pixel width +face_obj_parameter['h'] = 0 # the largest face pixel height +face_obj_parameter['n'] = 0 # Number of faces detected + +face_cascade = None + +def set_face_detection_model(model_path): + ''' + Set face detection model path + + :param model_path: The path of face haar-cascade XML classifier file + :type model_path: str + ''' + global face_cascade, face_model_path + + face_model_path = model_path + face_cascade = cv2.CascadeClassifier(face_model_path) + + +def face_detect(img, width, height, rectangle_color=(255, 0, 0)): + ''' + Face detection with opencv + + :param img: The detected image data + :type img: list + :param width: The width of the image data + :type width: int + :param height: The height of the image data + :type height: int + :param rectangle_color: The color (BGR, tuple) of rectangle. Eg: (255, 0, 0). + :type color_name: tuple + :returns: The image returned after detection. + :rtype: Binary list + ''' + global face_cascade + # Reduce image for faster recognition + zoom = 2 + width_zoom = int(width / zoom) + height_zoom = int(height / zoom) + resize_img = cv2.resize(img, (width_zoom, height_zoom), interpolation=cv2.INTER_LINEAR) + + # Converting the image to grayscale + gray_img = cv2.cvtColor(resize_img, cv2.COLOR_BGR2GRAY) + + # Loading the haar-cascade XML classifier file + if face_cascade is None: + face_cascade = cv2.CascadeClassifier(face_model_path) + + # Applying the face detection method on the grayscale image + faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.3, minNeighbors=3) + + face_obj_parameter['n'] = len(faces) + + # Iterating over all detected faces + if face_obj_parameter['n'] > 0: + max_area = 0 + for (x,y,w,h) in faces: + x = x * zoom + y = y * zoom + w = w * zoom + h = h * zoom + # Draw rectangle around the face + cv2.rectangle(img, (x, y), (x+w, y+h), rectangle_color, 2) + + # Save the attribute of the largest color block + object_area = w * h + if object_area > max_area: + max_area = object_area + face_obj_parameter['x'] = int(x + w/2) + face_obj_parameter['y'] = int(y + h/2) + face_obj_parameter['w'] = w + face_obj_parameter['h'] = h + else: + face_obj_parameter['x'] = width/2 + face_obj_parameter['y'] = height/2 + face_obj_parameter['w'] = 0 + face_obj_parameter['h'] = 0 + face_obj_parameter['n'] = 0 + + return img + +# Test +def test(): + print("face detection ...") + + cap = cv2.VideoCapture(0) + cap.set(3, 640) + cap.set(4, 480) + + while cap.isOpened(): + success,frame = cap.read() + if not success: + print("Ignoring empty camera frame.") + # If loading a video, use 'break' instead of 'continue'. + continue + + # frame = cv2.flip(frame, -1) # Flip camera vertically + + out_img = face_detect(frame, 640, 480) + + cv2.imshow('Face detecting ...', out_img) + + # if cv2.waitKey(1) & 0xFF == ord('q'): + # break + # if cv2.waitKey(1) & 0xff == 27: # press 'ESC' to quit + # break + # if cv2.getWindowProperty('Face detecting ...', 1) < 0: + # break + + key = cv2.waitKey(10) & 0xff + print(key) + + + cap.release() + cv2.destroyAllWindows() + +if __name__ == "__main__": + test() + + diff --git a/build/lib/vilib/hands_detection.py b/build/lib/vilib/hands_detection.py new file mode 100644 index 0000000..91e16ca --- /dev/null +++ b/build/lib/vilib/hands_detection.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +import cv2 +import mediapipe as mp +from ast import literal_eval + +mp_drawing = mp.solutions.drawing_utils +# mp_drawing_styles = mp.solutions.drawing_styles +mp_hands = mp.solutions.hands + +class DetectHands(): + def __init__(self): + self.hands = mp_hands.Hands(max_num_hands = 1, + min_detection_confidence=0.5, + min_tracking_confidence=0.5) + + def work(self,image): + joints = [] + if len(image) != 0: + # To improve performance, optionally mark the image as not writeable to + # pass by reference. + image.flags.writeable = False + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + results = self.hands.process(image) + + # Draw the hand annotations on the image. + image.flags.writeable = True + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + if results.multi_hand_landmarks: + for hand_landmarks in results.multi_hand_landmarks: + mp_drawing.draw_landmarks( + image, + hand_landmarks, + mp_hands.HAND_CONNECTIONS,) + # mp_drawing_styles.get_default_hand_landmarks_style(), + # mp_drawing_styles.get_default_hand_connections_style()) + joints = str(results.multi_hand_landmarks).replace('\n','').replace(' ','').replace('landmark',',').replace(',','',1) + joints = joints.replace('{x:','[').replace('y:',',').replace('z:',',').replace('}',']') + try: + joints = literal_eval(joints) + except Exception as e: + raise(e) + return image,joints + diff --git a/build/lib/vilib/image_classification.py b/build/lib/vilib/image_classification.py new file mode 100644 index 0000000..fc07147 --- /dev/null +++ b/build/lib/vilib/image_classification.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import time +import os +import numpy as np + +import cv2 + +from tflite_runtime.interpreter import Interpreter +import threading + +from .utils import load_labels + +CAMERA_WIDTH = 640 +CAMERA_HEIGHT = 480 + +default_model = '/opt/vilib/mobilenet_v1_0.25_224_quant.tflite' +default_labels = '/opt/vilib/labels_mobilenet_quant_v1_224.txt' + +image_classification_obj_parameter = {} +image_classification_obj_parameter['name'] = "" # result +image_classification_obj_parameter['acc'] = 0 # accuracy + +def set_input_tensor(interpreter, image): + tensor_index = interpreter.get_input_details()[0]['index'] + input_tensor = interpreter.tensor(tensor_index)()[0] + input_tensor[:, :] = image + + +def __classify_image(interpreter, image, labels_map): + """Returns a sorted array of classification results.""" + set_input_tensor(interpreter, image) + interpreter.invoke() + output_details = interpreter.get_output_details()[0] + output = np.squeeze(interpreter.get_tensor(output_details['index'])) + + # If the model is quantized (uint8 data), then dequantize the results + if output_details['dtype'] == np.uint8: + scale, zero_point = output_details['quantization'] + output = scale * (output - zero_point) + + # for i,out in enumerate(output): + # print(labels_map[i],round(out,3)) + # print('> ',end=' ') + + # Sort the results + ordered = np.argpartition(-output, 1) + # Return the person with the highest score + return [(i, output[i]) for i in ordered[:1]] + + +results = [] +image = [] +elapsed_ms = 0 +run_flag = False + +def imgshow_fuc(input_height, input_width,labels): + + global results + global elapsed_ms + global image + global run_flag + + run_flag = True + + counter, fps = 0, 0 + start_time = time.time() + fps_avg_frame_count = 10 + + # open camera + cap = cv2.VideoCapture(0) + cap.set(3,CAMERA_WIDTH) + cap.set(4,CAMERA_HEIGHT) + print('start...') + + while cap.isOpened(): + + success,frame = cap.read() + if not success: + print("Ignoring empty camera frame.") + # If loading a video, use 'break' instead of 'continue'. + continue + + + # frame = cv2.flip(frame, -1) # Flip camera vertically + image = cv2.resize(frame,(input_width,input_height)) + + counter += 1 + if counter % fps_avg_frame_count == 0: + end_time = time.time() + fps = fps_avg_frame_count / (end_time - start_time) + start_time = time.time() + + if len(results) != 0: + label_id, prob = results[0] + cv2.putText(frame, + f"{labels[label_id]} {prob:.3f}", # text + (CAMERA_WIDTH-120, 10), # origin + cv2.FONT_HERSHEY_SIMPLEX, # font + 0.8, # font_scale + (0,255,255), # font_color + 1, # thickness + cv2.LINE_AA # line_type: LINE_8 (default), LINE_4, LINE_AA + ) + cv2.putText(frame, '%.1fms' % (elapsed_ms), (CAMERA_WIDTH-120, 40),cv2.FONT_HERSHEY_PLAIN,1, (255, 255, 225), 1) + cv2.putText(frame, 'fps %s'%round(fps,1), (CAMERA_WIDTH-120, 20),cv2.FONT_HERSHEY_PLAIN,1,(255, 255, 225),1) + cv2.imshow('Detecting...', frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + if cv2.waitKey(1) & 0xff == 27: # press 'ESC' to quit + break + if cv2.getWindowProperty('Detecting...',1) < 0: + break + + run_flag = False + cap.release() + cv2.destroyAllWindows() + +def main(): + # setting parameters of model and corresponding label + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + '--model', + help='File path of .tflite file.', + required=False, + default=default_model) + parser.add_argument( + '--labels', + help='File path of labels file.', + required=False, + default=default_labels) + args = parser.parse_args() + + # loading model and corresponding label + labels = load_labels(args.labels) + interpreter = Interpreter(args.model) + interpreter.allocate_tensors() + _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape'] + + imgshow_t = threading.Thread(target=imgshow_fuc,args=(input_height, input_width,labels)) + imgshow_t.start() + + global results + global elapsed_ms + global run_flag + + while True: + + if len(image) != 0: + start_time = time.monotonic() + results = __classify_image(interpreter, image,labels) + elapsed_ms = (time.monotonic() - start_time) * 1000 + label_id, prob = results[0] + print(labels[label_id], prob) + print(' ') + + if run_flag == False: + print('\nend...') + break + + time.sleep(0.01) + + +def classify_image(image, model=None, labels=None): + # loading model and corresponding label + if model is None: + model = default_model + if labels is None: + labels = default_labels + + if not os.path.exists(model): + print('incorrect model path ') + return image + if not os.path.exists(labels): + print('incorrect labels path ') + return image + labels = load_labels(labels) + interpreter = Interpreter(model) + interpreter.allocate_tensors() + _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape'] + + if len(image) != 0: + # resize + img = cv2.resize(image, (input_width, input_height)) + # classify + results = __classify_image(interpreter, img,labels) + label_id, prob = results[0] + # print(labels[label_id], prob) + + image_classification_obj_parameter['name'] = labels[label_id] + image_classification_obj_parameter['acc'] = prob + + # putText + cv2.putText(image, + f"{labels[label_id]} {prob:.3f}", # text + (10, 25), # origin + cv2.FONT_HERSHEY_SIMPLEX, # font + 0.8, # font_scale + (0, 255, 255), # font_color + 1, # thickness + cv2.LINE_AA # line_type: LINE_8 (default), LINE_4, LINE_AA + ) + + return image + +if __name__ == '__main__': + main() diff --git a/build/lib/vilib/mediapipe_object_detection.py b/build/lib/vilib/mediapipe_object_detection.py new file mode 100644 index 0000000..abae0ce --- /dev/null +++ b/build/lib/vilib/mediapipe_object_detection.py @@ -0,0 +1,137 @@ +# https://ai.google.dev/edge/mediapipe/solutions/vision/object_detector/python + +import cv2 +import numpy as np +import mediapipe as mp +import time + + +class MediapipeObjectDetection: + + # wget -q -O efficientdet.tflite -q https://storage.googleapis.com/mediapipe-models/object_detector/efficientdet_lite0/int8/1/efficientdet_lite0.tflite + DEFAULT_MODEL = '/opt/vilib/efficientdet_lite0.tflite' + + CAMERA_WIDTH = 640 + CAMERA_HEIGHT = 480 + + colors = [(0,255,255),(255,0,0),(0,255,64),(255,255,0), + (255,128,64),(128,128,255),(255,128,255),(255,128,128)] + + def __init__(self, + model:str=DEFAULT_MODEL, + max_results:int=10, + score_threshold:float=0.3, + width:int=CAMERA_WIDTH, + height:int=CAMERA_HEIGHT, + ): + """ + Args: + img: The input image. + max_results: Max number of detection results. + score_threshold: The score threshold of detection results. + model: Name of the TFLite object detection model. + width: The width of the frame captured from the camera. + height: The height of the frame captured from the camera. + """ + + # Initialize the object detection model + + BaseOptions = mp.tasks.BaseOptions + ObjectDetector = mp.tasks.vision.ObjectDetector + ObjectDetectorOptions = mp.tasks.vision.ObjectDetectorOptions + VisionRunningMode = mp.tasks.vision.RunningMode + + options = ObjectDetectorOptions( + base_options=BaseOptions(model_asset_path=model), + max_results=max_results, + score_threshold=score_threshold, + running_mode=VisionRunningMode.IMAGE) + + self.detector = ObjectDetector.create_from_options(options) + + + def detect(self, image): + rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=rgb_image) + result = self.detector.detect(mp_image) + return result + + def draw(self, image, detection_result, x_boom:int=1, y_boom:int=1): + for i, detection in enumerate(detection_result.detections): + # Draw bounding_box + bbox = detection.bounding_box + start_point = int(bbox.origin_x*x_boom), int(bbox.origin_y*y_boom) + end_point = int(bbox.origin_x*x_boom) + int(bbox.width*x_boom), int(bbox.origin_y*y_boom) + int(bbox.height*y_boom) + # Use the orange color for high visibility. + cv2.rectangle(image, + start_point, + end_point, + self.colors[i%7], + 2 + ) + + # Draw label and score + category = detection.categories[0] + category_name = category.category_name + probability = round(category.score, 2) + result_text = category_name + ' (' + str(probability) + ')' + text_location = (10 + int(bbox.origin_x*x_boom), + 18 + int(bbox.origin_y*y_boom)) + cv2.putText(image, + result_text, + text_location, + cv2.FONT_HERSHEY_DUPLEX, + 0.8, + self.colors[i%7], + 1, + cv2.LINE_AA) + + return image + + + +if __name__ == '__main__': + from picamera2 import MappedArray, Picamera2, Preview + import libcamera + import time + import cv2 + + picam2 = Picamera2() + config = picam2.create_preview_configuration( + main={"size": (640, 480), "format": "RGB888"}, + transform=libcamera.Transform(hflip=True, vflip=True) + ) + picam2.configure(config) + picam2.start() + + detector = MediapipeObjectDetection() + framecount = 0 + fps = 0.0 + start_time = time.time() + while True: + img = picam2.capture_array() + result = detector.detect(img) + img = detector.draw(img, result, x_boom=1, y_boom=1) + + # calculate fps + framecount += 1 + elapsed_time = float(time.time() - start_time) + if (elapsed_time > 1): + fps = round(framecount/elapsed_time, 1) + framecount = 0 + start_time = time.time() + + cv2.putText( + img, # image + f"FPS: {fps}", # text + (520, 20), # origin + cv2.FONT_HERSHEY_SIMPLEX, # font + 0.6, # font_scale + (255, 255, 255), # font_color + 1, # thickness + cv2.LINE_AA, # line_type: LINE_8 (default), LINE_4, LINE_AA + ) + + cv2.imshow('image', img) + cv2.waitKey(1) + diff --git a/build/lib/vilib/objects_detection.py b/build/lib/vilib/objects_detection.py new file mode 100644 index 0000000..56f8618 --- /dev/null +++ b/build/lib/vilib/objects_detection.py @@ -0,0 +1,242 @@ +#!/usr/bin/env python3 +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import re +import time +import os + +import numpy as np +import cv2 +from PIL import Image +from tflite_runtime.interpreter import Interpreter +import threading + +from .utils import load_labels + +CAMERA_WIDTH = 640 +CAMERA_HEIGHT = 480 + +default_model = '/opt/vilib/detect.tflite' +default_labels = '/opt/vilib/coco_labels.txt' + +####################################################### +object_detection_list_parameter = [] + +def add_class_names(objects): + labels = load_labels(default_labels) + for object in objects: + object["class_name"] = labels[object['class_id']] + +def copy_list_into_list(source,destination): + destination.clear() + for i in source: + destination.append(i) +####################################################### + +def set_input_tensor(interpreter, image): + """Sets the input tensor.""" + tensor_index = interpreter.get_input_details()[0]['index'] + input_tensor = interpreter.tensor(tensor_index)()[0] + input_tensor[:, :] = image + + +def get_output_tensor(interpreter, index): + """Returns the output tensor at the given index.""" + output_details = interpreter.get_output_details()[index] + tensor = np.squeeze(interpreter.get_tensor(output_details['index'])) + return tensor + + +def __detect_objects(interpreter, image, threshold): + """Returns a list of detection results, each a dictionary of object info.""" + set_input_tensor(interpreter, image) + interpreter.invoke() + + # Get all output details + boxes = get_output_tensor(interpreter, 0) + classes = get_output_tensor(interpreter, 1) + scores = get_output_tensor(interpreter, 2) + count = int(get_output_tensor(interpreter, 3)) + + results = [] + for i in range(count): + if scores[i] >= threshold: + result = { + 'bounding_box': boxes[i], + 'class_id': classes[i], + 'score': scores[i] + } + results.append(result) + #global object_detection_list_parameter + # Allow programmer to access the results + copy_list_into_list(results,object_detection_list_parameter) + add_class_names(object_detection_list_parameter) + return results + + +colors = [(0,255,255),(255,0,0),(0,255,64),(255,255,0), + (255,128,64),(128,128,255),(255,128,255),(255,128,128)] + +def put_text(img,results,labels_map,width=CAMERA_WIDTH,height=CAMERA_HEIGHT): + for i,obj in enumerate(results): + # Convert the bounding box figures from relative coordinates + # to absolute coordinates based on the original resolution + ymin, xmin, ymax, xmax = obj['bounding_box'] + xmin = int(xmin * width) + xmax = int(xmax * width) + ymin = int(ymin * height) + ymax = int(ymax * height) + + cv2.rectangle(img,(xmin, ymin), (xmax, ymax),colors[i%7],2) + cv2.putText(img, + f"{labels_map[obj['class_id']]} {obj['score']:.2f}", + (xmin+6, ymin+18), + cv2.FONT_HERSHEY_PLAIN, #FONT_HERSHEY_DUPLEX + 1.2, + colors[i%7], + 1, + cv2.LINE_AA # line_type: LINE_8 (default), LINE_4, LINE_AA + ) + # print('%s %.2f' % (labels_map[obj['class_id']], obj['score'])) + # print('\n') + + return img + +# For static images: +def detect_objects(image, model=None, labels=None, width=CAMERA_WIDTH, height=CAMERA_HEIGHT, threshold=0.4): + # loading model and corresponding label + if model is None: + model = default_model + if labels is None: + labels = default_labels + + if not os.path.exists(model): + print('incorrect model path ') + return image + if not os.path.exists(labels): + print('incorrect labels path ') + return image + labels = load_labels(labels) + interpreter = Interpreter(model) + interpreter.allocate_tensors() + _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape'] + + if len(image) != 0: + # resize + img = cv2.resize(image, (input_width, input_height)) + # classify + results = __detect_objects(interpreter, img, threshold) + # putText + image = put_text(image, results, labels, width, height) + + return image + + +# For webcam: +results = [] +image = [] +elapsed_ms = 0 +run_flag = False + +def imgshow_fuc(input_height, input_width,labels): + + global results + global elapsed_ms + global image + global run_flag + + run_flag = True + + counter, fps = 0, 0 + start_time = time.time() + fps_avg_frame_count = 10 + + # open camera + cap = cv2.VideoCapture(0) + cap.set(3,CAMERA_WIDTH) + cap.set(4,CAMERA_HEIGHT) + print('start...') + + while cap.isOpened(): + ret,frame = cap.read() + # frame = cv2.flip(frame, -1) # Flip camera vertically + image = cv2.resize(frame,(input_width,input_height)) + + counter += 1 + if counter % fps_avg_frame_count == 0: + end_time = time.time() + fps = fps_avg_frame_count / (end_time - start_time) + start_time = time.time() + + img = put_text(frame,results,labels) + cv2.putText(img, '%.1fms' % (elapsed_ms), (CAMERA_WIDTH-120, 40),cv2.FONT_HERSHEY_PLAIN,1, (255, 255, 225), 1) + cv2.putText(img, 'fps %s'%round(fps,1), (CAMERA_WIDTH-120, 20),cv2.FONT_HERSHEY_PLAIN,1,(255, 255, 225),1) + cv2.imshow('Detecting...', img) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + if cv2.waitKey(1) & 0xff == 27: # press 'ESC' to quit + break + if cv2.getWindowProperty('Detecting...',1) < 0: + break + + run_flag = False + cap.release() + cv2.destroyAllWindows() + + +def main(): + # setting parameters of model and corresponding label + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + '--model', + help='File path of .tflite file.', + required=False, + default=default_model) + parser.add_argument( + '--labels', + help='File path of labels file.', + required=False, + default=default_labels) + parser.add_argument( + '--threshold', + help='Score threshold for detected objects.', + required=False, + type=float, + default=0.4) + args = parser.parse_args() + + # loading model and corresponding label + labels = load_labels(args.labels) + interpreter = Interpreter(args.model) + interpreter.allocate_tensors() + _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape'] + + imgshow_t = threading.Thread(target=imgshow_fuc,args=(input_height, input_width,labels)) + imgshow_t.start() + + global results + global elapsed_ms + global run_flag + + while True: + + if len(image) != 0: + start_time = time.monotonic() + results = __detect_objects(interpreter, image,args.threshold) + elapsed_ms = (time.monotonic() - start_time) * 1000 + # print(results) + + if run_flag == False: + print('\nend...') + break + + +if __name__ == '__main__': + main() + + diff --git a/build/lib/vilib/pose_detection.py b/build/lib/vilib/pose_detection.py new file mode 100644 index 0000000..6a7252a --- /dev/null +++ b/build/lib/vilib/pose_detection.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +import cv2 +import mediapipe as mp +from ast import literal_eval + +mp_drawing = mp.solutions.drawing_utils +# mp_drawing_styles = mp.solutions.drawing_styles +mp_pose = mp.solutions.pose + +class DetectPose(): + def __init__(self): + self.pose = mp_pose.Pose(min_detection_confidence=0.5, + min_tracking_confidence=0.5) + + def work(self,image): + joints = [] + if len(image) != 0: + # To improve performance, optionally mark the image as not writeable to + # pass by reference. + image.flags.writeable = False + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + results = self.pose.process(image) + + # Draw the pose annotation on the image. + image.flags.writeable = True + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + mp_drawing.draw_landmarks( + image, + results.pose_landmarks, + mp_pose.POSE_CONNECTIONS,) + # landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style()) + + joints = str(results.pose_landmarks).replace('\n','').replace(' ','').replace('landmark',',').replace(',','',1) + joints = '['+joints.replace('{x:','[').replace('y:',',').replace('z:',',').replace('visibilit','').replace('}',']')+']' + try: + joints = literal_eval(joints) + except Exception as e: + raise(e) + return image,joints diff --git a/build/lib/vilib/qrcode_recognition.py b/build/lib/vilib/qrcode_recognition.py new file mode 100644 index 0000000..e5a82dc --- /dev/null +++ b/build/lib/vilib/qrcode_recognition.py @@ -0,0 +1,90 @@ +import cv2 +from pyzbar import pyzbar +from PIL import Image, ImageDraw, ImageFont +import numpy as np + +'''Define parameters for qrcode recognition object''' +qrcode_obj_parameter = {} +qrcode_obj_parameter['x'] = 0 # the largest block center x-axis coordinate +qrcode_obj_parameter['y'] = 0 # the largest block center y-axis coordinate +qrcode_obj_parameter['w'] = 0 # the largest block pixel width +qrcode_obj_parameter['h'] = 0 # the largest block pixel height +qrcode_obj_parameter['data'] = "None" # recognition result +qrcode_obj_parameter['list'] = [] + +FONT_PATH = "/opt/vilib/Arial-Unicode-Regular.ttf" +FONT_SIZE = 16 +font = None + +def qrcode_recognize(img, border_rgb=(255, 0, 0), font_color=(0, 0, 255)): + global font + + # Detect and decode QR codes + barcodes = pyzbar.decode(img) + + qrcode_obj_parameter['list'].clear() + + if len(barcodes) > 0: + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = Image.fromarray(img) + draw = ImageDraw.Draw(img) + + if font is None: + font = ImageFont.truetype(FONT_PATH, FONT_SIZE, encoding="utf-8") + + for barcode in barcodes: + # Return the coordinate(top left), width and height of contour + (x, y, w, h) = barcode.rect + + # cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2) + draw.rectangle([x, y, x+w, y+h], outline=border_rgb, width=2) + + # the barcode data is a byte object, converted into a string + barcodeData = barcode.data.decode("utf-8") + # barcodeType = barcode.type + text = f"{barcodeData}" + + # add the barcode to the list of barcodes for output + qrcode_obj_parameter['list'].append({ + 'text': text, + 'x': x, + 'y': y, + 'w': w, + 'h': h, + }) + + if len(text) > 0: + qrcode_obj_parameter['data'] = text + qrcode_obj_parameter['h'] = h + qrcode_obj_parameter['w'] = w + qrcode_obj_parameter['x'] = x + qrcode_obj_parameter['y'] = y + # cv2.putText( + # img, # image + # text, # text + # (x, y - 10), # origin + # cv2.FONT_HERSHEY_SIMPLEX, # font + # 0.5, # font_scale + # border_rgb, # font_color + # 1, # thickness + # cv2.LINE_AA, # line_type: LINE_8 (default), LINE_4, LINE_AA + # ) + draw.text((x, y-FONT_SIZE-2), text, font_color, font=font) + else: + qrcode_obj_parameter['data'] = "None" + qrcode_obj_parameter['x'] = 0 + qrcode_obj_parameter['y'] = 0 + qrcode_obj_parameter['w'] = 0 + qrcode_obj_parameter['h'] = 0 + + img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) + + return img + else: + qrcode_obj_parameter['data'] = "None" + qrcode_obj_parameter['x'] = 0 + qrcode_obj_parameter['y'] = 0 + qrcode_obj_parameter['w'] = 0 + qrcode_obj_parameter['h'] = 0 + qrcode_obj_parameter['list'] = [] + return img diff --git a/build/lib/vilib/traffic_sign_detection.py b/build/lib/vilib/traffic_sign_detection.py new file mode 100644 index 0000000..fdd3daf --- /dev/null +++ b/build/lib/vilib/traffic_sign_detection.py @@ -0,0 +1,341 @@ +import cv2 +import numpy as np +import os +from tflite_runtime.interpreter import Interpreter +from .utils import load_labels +# https://docs.sunfounder.com/projects/picar-x-v20/en/latest/ezblock/ezblock_traffic.html +# https://github.com/sunfounder/sf-pdf/raw/master/prop_card/object_detection/traffic-sign-cards.pdf + +'''Define parameters for traffic sign detection object''' +traffic_sign_obj_parameter = {} +traffic_sign_obj_parameter['x'] = 0 # Maximum traffic sign block center x-axis coordinate +traffic_sign_obj_parameter['y'] = 0 # Maximum traffic sign block center y-axis coordinate +traffic_sign_obj_parameter['w'] = 0 # Maximum face block pixel width +traffic_sign_obj_parameter['h'] = 0 # Maximum face block pixel height +traffic_sign_obj_parameter['t'] = 'none' # traffic sign text, could be: 'none', 'stop','right','left','forward' +traffic_sign_obj_parameter['acc'] = 0 # accuracy + +'''Default model and labels''' +traffic_sign_model_path = "/opt/vilib/traffic_sign_150_dr0.2.tflite" # default model path +traffic_sign_labels_path = '/opt/vilib/traffic_sign_150_dr0.2_labels.txt' # default model path + + +def traffic_sign_predict(interpreter, img): + ''' + Traffic sign predict type + + :param img: The detected image data + :type img: list + :param img: The detected image data + :type img: list + :returns: The confidence value and index of type + :rtype: tuple (confidence:float, type:str) + ''' + _, model_width, model_height, model_depth = interpreter.get_input_details()[0]['shape'] + if model_depth != 3 and model_depth != 1: + raise ValueError('Unsupported model depth') + + # resize the image according to the model size + resize_img = cv2.resize(img, (model_width, model_height), interpolation=cv2.INTER_LINEAR) + + + flatten_img = np.reshape(resize_img, (model_width, model_height, model_depth)) + im5 = np.expand_dims(flatten_img, axis = 0) + img_np_expanded = im5.astype('float32') + + # Perform the actual detection by running the model with the image as input + tensor_index = interpreter.get_input_details()[0]['index'] + interpreter.set_tensor(tensor_index, img_np_expanded) + interpreter.invoke() + + output_details = interpreter.get_output_details()[0] + output_data = interpreter.get_tensor(output_details['index']) + + result = np.squeeze(output_data) + accuracy = round(np.max(result), 2) + type_idnex = np.where(result==np.max(result))[0][0] + + return accuracy, type_idnex + +def cnt_area(cnt): + # Return the coordinate(top left), width and height of contour + x, y, w, h = cv2.boundingRect(cnt) + return w*h + + +def traffic_sign_detect(img, model=None, labels=None, border_rgb=(255, 0, 0)): + ''' + Traffic sign detection + + :param img: The detected image data + :type img: list + :param model: The tflite model file path, if 'None' use default path + :type model: str + :param labels: The labels file path, if 'None' use default path + :type labels: str + :param border_rgb: The color (RGB, tuple) of border. Eg: (255, 0, 0). + :type color_name: tuple + :returns: The image returned after detection + :rtype: Binary list + ''' + # border_rgb to border_bgr + r, g, b = border_rgb + border_bgr = (b, g, r) + + # loading model and corresponding label + if model == None: + model = traffic_sign_model_path + if labels == None: + labels = traffic_sign_labels_path + + if not os.path.exists(model): + raise('incorrect model path ') + return img + if not os.path.exists(labels): + raise('incorrect labels path ') + return img + + labels = load_labels(labels) + interpreter = Interpreter(model) + interpreter.allocate_tensors() + + # _, model_height, model_width, _ = interpreter.get_input_details()[0]['shape'] + # print('get_input_details', interpreter.get_input_details()[0]['shape'] ) + + # get img shape + width, height, depth = np.shape(img) + + # Convert the image in BGR to HSV + hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) + + # Set range for red color and define mask + mask_red_1 = cv2.inRange(hsv, (157, 20, 20), (180, 255, 255)) + mask_red_2 = cv2.inRange(hsv, (0, 20, 20), (10, 255, 255)) + + # Set range for blue color and define mask + # mask_blue = cv2.inRange(hsv, (102, 50, 50), (125, 255, 255)) + mask_blue = cv2.inRange(hsv, (92, 10, 10), (125, 255, 255)) + + ### all + mask_all = cv2.bitwise_or(mask_red_1, mask_blue) + mask_all = cv2.bitwise_or(mask_red_2, mask_all) + + # define a 5*5 kernel + kernel_5 = np.ones((5, 5), np.uint8) + + # opening the image (erosion followed by dilation), to remove the image noise + open_img = cv2.morphologyEx(mask_all, cv2.MORPH_OPEN, kernel_5, iterations=1) + # cv2.imshow('open_img', open_img) + + # Find contours in binary image + _tuple = cv2.findContours(open_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + # compatible with opencv3.x and openc4.x + if len(_tuple) == 3: + _, contours, hierarchy = _tuple + else: + contours, hierarchy = _tuple + + # Sort contours by area from smallest to largest + contours = sorted(contours, key=cnt_area, reverse=False) + + contours_num = len(contours) + traffic_sign_num = 0 + if contours_num > 0: + # Iterate over all contours + max_area = 0 + for i in contours: + # Return the coordinate(top left), width and height of contour + x, y, w, h = cv2.boundingRect(i) + + if w > 32 and h > 32: + + # Convert img to gray, if grayscale model + model_depth = interpreter.get_input_details()[0]['shape'][3] + if model_depth == 1: + img_possible_part = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + else: + img_possible_part = img + + # Cut out the contour image + x1 = int(x) + x2 = int(x + w) + y1 = int(y) + y2 = int(y + h) + x1 = x1-8 + x1 = -x1 if x1 < 0 else x1 + x2 = x2+8 + y1 = y1-8 + y1 = -y1 if y1 < 0 else y1 + y2 = y2+8 + img_possible_part = img_possible_part[y1:y2, x1:x2] + img_possible_part = (img_possible_part / 255.0) + img_possible_part = (img_possible_part - 0.5) * 2.0 + + # cv2.imshow('img_possible_part', img_possible_part) + + # predict traffic sign type + acc_val, traffic_type = traffic_sign_predict(interpreter, img_possible_part) + # Convert confidence to percentage + acc_val = round(acc_val*100) + traffic_type = labels[traffic_type] + + if acc_val >= 85: + # print(traffic_type, acc_val) + + # If it is a forward, turn left or right traffic sign, outline a circle + if traffic_type == 'left' or \ + traffic_type == 'right' or \ + traffic_type == 'forward': + + # Convert to grayscale image and detect circle + simple_gray = cv2.cvtColor(img[y1:y2, x1:x2], cv2.COLOR_BGR2GRAY) + + circles = cv2.HoughCircles( + simple_gray, + cv2.HOUGH_GRADIENT, 1, 32, + param1=140, + param2=70, + minRadius=int(w/4.0), + maxRadius=max(w,h) + ) + # print(f'{circles}: circles') + + # Draw a circle outline, add text description + if circles is not None: + # Iterate over all circles and find the circle with the largest radius + max_radius = 0 + max_circle_index = 0 + max_circle = None + for circle in circles[0,:]: + # circle[center_xpos, center_ypos, radius] + if circle[2] > max_radius: + max_radius = circle[2] + max_circle = circle + traffic_sign_coor = (int(x+max_circle[0]),int(y+max_circle[1])) + cv2.circle(img, traffic_sign_coor, int(max_circle[2]), border_bgr, 2) + cv2.putText(img, + f"{traffic_type}:{acc_val:.1f}", + (int(x+max_circle[0]-max_circle[2]), int(y+max_circle[1]-max_circle[2]-5)), + cv2.FONT_HERSHEY_SIMPLEX, + 0.6, # font size + border_bgr, # color + 1, # thickness + cv2.LINE_AA, + ) + if w * h > max_area: + max_area = w * h + max_obj_x = x + max_obj_y = y + max_obj_w = w + max_obj_h = h + max_obj_t = traffic_type + max_obj_acc = acc_val + traffic_sign_num += 1 + + # If it is a STOP traffic sign, outline a rectangle + elif traffic_type == 'stop': + red_mask_1 = cv2.inRange(hsv[y:y+h,x:x+w],(0,50,20), (4,255,255)) # 3.inRange():介于lower/upper之间的为白色,其余黑色 + red_mask_2 = cv2.inRange(hsv[y:y+h,x:x+w],(163,50,20), (180,255,255)) + red_mask_all = cv2.bitwise_or(red_mask_1,red_mask_2) + + open_img = cv2.morphologyEx(red_mask_all, cv2.MORPH_OPEN,kernel_5,iterations=1) #开运算 + open_img = cv2.dilate(open_img, kernel_5,iterations=5) + # Find contours in binary image + _tuple = cv2.findContours(open_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + # compatible with opencv3.x and openc4.x + if len(_tuple) == 3: + _, blue_contours, hierarchy = _tuple + else: + blue_contours, hierarchy = _tuple + + contours_count = len(blue_contours) + if contours_count >=1: + blue_contours = sorted(blue_contours,key = cnt_area, reverse=True) + + epsilon = 0.025 * cv2.arcLength(blue_contours[0], True) + approx = cv2.approxPolyDP(blue_contours[0], epsilon, True) + corners = len(approx) + if corners >= 0: + traffic_sign_coor = (int(x+w/2),int(y+h/2)) + cv2.rectangle(img, (x,y), (x+w,y+h), border_bgr, 2) + cv2.putText(img, + f"{traffic_type}:{acc_val:.1f}", + (x, y-5), + cv2.FONT_HERSHEY_SIMPLEX, + 0.6, # font size + border_bgr, # color + 1, # thickness + cv2.LINE_AA, + ) + if w * h > max_area: + max_area = w * h + max_obj_x = x + max_obj_y = y + max_obj_w = w + max_obj_h = h + max_obj_t = traffic_type + max_obj_acc = acc_val + traffic_sign_num += 1 + + if traffic_sign_num > 0: + traffic_sign_obj_parameter['x'] = int(max_obj_x + max_obj_w/2) + traffic_sign_obj_parameter['y'] = int(max_obj_y + max_obj_h/2) + traffic_sign_obj_parameter['w'] = max_obj_w + traffic_sign_obj_parameter['h'] = max_obj_h + traffic_sign_obj_parameter['t'] = max_obj_t + traffic_sign_obj_parameter['acc'] = max_obj_acc + + if traffic_sign_num <= 0: + traffic_sign_obj_parameter['x'] = 0 + traffic_sign_obj_parameter['y'] = 0 + traffic_sign_obj_parameter['w'] = 0 + traffic_sign_obj_parameter['h'] = 0 + traffic_sign_obj_parameter['t'] = 'none' + traffic_sign_obj_parameter['acc'] = 0 + + # print(f'traffic_sign_num {traffic_sign_num}') + return img + + +# Test +def test(): + print("traffic sign detection ...") + + from picamera2 import Picamera2 + import libcamera + + picam2 = Picamera2() + preview_config = picam2.preview_configuration + # preview_config.size = (800, 600) + preview_config.size = (640, 480) + preview_config.format = 'RGB888' # 'XRGB8888', 'XBGR8888', 'RGB888', 'BGR888', 'YUV420' + preview_config.transform = libcamera.Transform(hflip=False, + vflip=False) + preview_config.colour_space = libcamera.ColorSpace.Sycc() + preview_config.buffer_count = 4 + preview_config.queue = True + + picam2.start() + + while True: + frame = picam2.capture_array() + + # frame = cv2.flip(frame, 0) # Flip camera horizontally + # frame = cv2.flip(frame, 1) # Flip camera vertically + # frame = cv2.flip(frame, -1) # Flip camera vertically & horizontally + + out_img = traffic_sign_detect(frame, border_rgb=(255, 255, 0)) + + cv2.imshow('Traffic sign detecting ...', out_img) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + if cv2.waitKey(1) & 0xff == 27: # press 'ESC' to quit + break + if cv2.getWindowProperty('Traffic sign detecting ...', 1) < 0: + break + + cv2.destroyAllWindows() + +if __name__ == "__main__": + test() diff --git a/build/lib/vilib/utils.py b/build/lib/vilib/utils.py new file mode 100644 index 0000000..560cc2f --- /dev/null +++ b/build/lib/vilib/utils.py @@ -0,0 +1,47 @@ +import os + +# utils +# ================================================================= +def run_command(cmd): + import subprocess + p = subprocess.Popen( + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + result = p.stdout.read().decode('utf-8') + status = p.poll() + return status, result + +def getIP(): + wlan0 = os.popen("ifconfig wlan0 |awk '/inet/'|awk 'NR==1 {print $2}'").readline().strip('\n') + eth0 = os.popen("ifconfig eth0 |awk '/inet/'|awk 'NR==1 {print $2}'").readline().strip('\n') + + if wlan0 == '': + wlan0 = None + if eth0 == '': + eth0 = None + + return wlan0,eth0 + +def check_machine_type(): + import platform + machine_type = platform.machine() + if machine_type == "armv7l": + return 32, machine_type + elif machine_type == "aarch64": + return 64, machine_type + else: + raise ValueError(f"[{machine_type}] not supported") + +def load_labels(path): + """Loads the labels file. Supports files with or without index numbers.""" + import re + + with open(path, 'r', encoding='utf-8') as f: + lines = f.readlines() + labels = {} + for row_number, content in enumerate(lines): + pair = re.split(r'[:\s]+', content.strip(), maxsplit=1) + if len(pair) == 2 and pair[0].strip().isdigit(): + labels[int(pair[0])] = pair[1].strip() + else: + labels[row_number] = pair[0].strip() + return labels \ No newline at end of file diff --git a/build/lib/vilib/version.py b/build/lib/vilib/version.py new file mode 100644 index 0000000..50d85c8 --- /dev/null +++ b/build/lib/vilib/version.py @@ -0,0 +1 @@ +__version__ = "0.3.18" diff --git a/build/lib/vilib/vilib.py b/build/lib/vilib/vilib.py new file mode 100644 index 0000000..6008f22 --- /dev/null +++ b/build/lib/vilib/vilib.py @@ -0,0 +1,856 @@ +#!/usr/bin/env python3 + +# whther print welcome message +import os +import logging + +from .version import __version__ +if 'VILIB_WELCOME' not in os.environ or os.environ['VILIB_WELCOME'] not in [ + 'False', '0' +]: + from pkg_resources import require + picamera2_version = require('picamera2')[0].version + print(f'vilib {__version__} launching ...') + print(f'picamera2 {picamera2_version}') + +# set libcamera2 log level +os.environ['LIBCAMERA_LOG_LEVELS'] = '*:ERROR' +from picamera2 import Picamera2 +import libcamera + +import cv2 +import numpy as np +from PIL import Image, ImageDraw, ImageFont + +from flask import Flask, render_template, Response + +import time +import datetime +import threading +from multiprocessing import Process, Manager + +from .utils import * + +# user and user home directory +# ================================================================= +user = os.popen("echo ${SUDO_USER:-$(who -m | awk '{ print $1 }')}").readline().strip() +user_home = os.popen(f'getent passwd {user} | cut -d: -f 6').readline().strip() +# print(f"user: {user}") +# print(f"user_home: {user_home}") + +# Default path for pictures and videos +DEFAULLT_PICTURES_PATH = '%s/Pictures/vilib/'%user_home +DEFAULLT_VIDEOS_PATH = '%s/Videos/vilib/'%user_home + +# utils +# ================================================================= +def findContours(img): + _tuple = cv2.findContours(img, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) + # compatible with opencv3.x and openc4.x + if len(_tuple) == 3: + _, contours, hierarchy = _tuple + else: + contours, hierarchy = _tuple + return contours, hierarchy + +# flask +# ================================================================= +os.environ['FLASK_DEBUG'] = 'development' +app = Flask(__name__) + +log = logging.getLogger('werkzeug') +log.setLevel(logging.ERROR) + +@app.route('/') +def index(): + """Video streaming home page.""" + return render_template('index.html') + +def get_frame(): + return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() + +def get_qrcode_pictrue(): + return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() + +def get_png_frame(): + return cv2.imencode('.png', Vilib.flask_img)[1].tobytes() + +def get_qrcode(): + while Vilib.qrcode_img_encode is None: + time.sleep(0.2) + + return Vilib.qrcode_img_encode + +def gen(): + """Video streaming generator function.""" + while True: + # start_time = time.time() + frame = get_frame() + yield (b'--frame\r\n' + b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') + time.sleep(0.03) + # end_time = time.time() - start_time + # print('flask fps:%s'%int(1/end_time)) + +@app.route('/mjpg') ## video +def video_feed(): + # from camera import Camera + """Video streaming route. Put this in the src attribute of an img tag.""" + if Vilib.web_display_flag: + response = Response(gen(), + mimetype='multipart/x-mixed-replace; boundary=frame') + response.headers.add("Access-Control-Allow-Origin", "*") + return response + else: + tip = ''' + Please enable web display first: + Vilib.display(web=True) +''' + html = f"

{tip}

" + return Response(html, mimetype='text/html') + +@app.route('/mjpg.jpg') # jpg +def video_feed_jpg(): + # from camera import Camera + """Video streaming route. Put this in the src attribute of an img tag.""" + response = Response(get_frame(), mimetype="image/jpeg") + response.headers.add("Access-Control-Allow-Origin", "*") + return response + +@app.route('/mjpg.png') # png +def video_feed_png(): + # from camera import Camera + """Video streaming route. Put this in the src attribute of an img tag.""" + response = Response(get_png_frame(), mimetype="image/png") + response.headers.add("Access-Control-Allow-Origin", "*") + return response + +@app.route("/qrcode") +def qrcode_feed(): + qrcode_html = ''' + + + + QRcode + + + + QR Code + + +''' + return Response(qrcode_html, mimetype='text/html') + + +@app.route("/qrcode.png") +def qrcode_feed_png(): + """Video streaming route. Put this in the src attribute of an img tag.""" + if Vilib.web_qrcode_flag: + # response = Response(get_qrcode(), + # mimetype='multipart/x-mixed-replace; boundary=frame') + response = Response(get_qrcode(), mimetype="image/png") + response.headers.add("Access-Control-Allow-Origin", "*") + return response + else: + tip = ''' + Please enable web display first: + Vilib.display_qrcode(web=True) +''' + html = f"

{tip}

" + return Response(html, mimetype='text/html') + +def web_camera_start(): + try: + Vilib.flask_start = True + app.run(host='0.0.0.0', port=9000, threaded=True, debug=False) + except Exception as e: + print(e) + +# Vilib +# ================================================================= +class Vilib(object): + + picam2 = Picamera2() + + camera_size = (640, 480) + camera_width = 640 + camera_height = 480 + camera_vflip = False + camera_hflip = False + camera_run = False + + flask_thread = None + camera_thread = None + flask_start = False + + qrcode_display_thread = None + qrcode_making_completed = False + qrcode_img = Manager().list(range(1)) + qrcode_img_encode = None + qrcode_win_name = 'qrcode' + + img = Manager().list(range(1)) + flask_img = Manager().list(range(1)) + + Windows_Name = "picamera" + imshow_flag = False + web_display_flag = False + imshow_qrcode_flag = False + web_qrcode_flag = False + + draw_fps = False + fps_origin = (camera_width-105, 20) + fps_size = 0.6 + fps_color = (255, 255, 255) + + detect_obj_parameter = {} + color_detect_color = None + face_detect_sw = False + hands_detect_sw = False + pose_detect_sw = False + image_classify_sw = False + image_classification_model = None + image_classification_labels = None + objects_detect_sw = False + objects_detection_model = None + objects_detection_labels = None + qrcode_detect_sw = False + traffic_detect_sw = False + + @staticmethod + def get_instance(): + return Vilib.picam2 + + @staticmethod + def set_controls(controls): + Vilib.picam2.set_controls(controls) + + @staticmethod + def get_controls(): + return Vilib.picam2.capture_metadata() + + @staticmethod + def camera(): + Vilib.camera_width = Vilib.camera_size[0] + Vilib.camera_height = Vilib.camera_size[1] + + picam2 = Vilib.picam2 + + # Ensure we have a fresh configuration + try: + preview_config = picam2.preview_configuration + if preview_config is None: + # Create new configuration if needed + config = picam2.create_preview_configuration() + picam2.configure(config) + preview_config = picam2.preview_configuration + except Exception as e: + print(f"Error getting preview configuration: {e}") + # Try to create a new configuration + config = picam2.create_preview_configuration() + picam2.configure(config) + preview_config = picam2.preview_configuration + + # preview_config.size = (800, 600) + if preview_config is not None: + preview_config.size = Vilib.camera_size + preview_config.format = 'RGB888' # 'XRGB8888', 'XBGR8888', 'RGB888', 'BGR888', 'YUV420' + preview_config.transform = libcamera.Transform( + hflip=Vilib.camera_hflip, + vflip=Vilib.camera_vflip + ) + preview_config.colour_space = libcamera.ColorSpace.Sycc() + preview_config.buffer_count = 4 + preview_config.queue = True + # preview_config.raw = {'size': (2304, 1296)} + preview_config.controls = {'FrameRate': 60} # change picam2.capture_array() takes time + + try: + picam2.start() + except Exception as e: + print(f"\033[38;5;1mError:\033[0m\n{e}") + print("\nPlease check whether the camera is connected well" +\ + "You can use the \"libcamea-hello\" command to test the camera" + ) + exit(1) + Vilib.camera_run = True + Vilib.fps_origin = (Vilib.camera_width-105, 20) + fps = 0 + start_time = 0 + framecount = 0 + try: + start_time = time.time() + while True: + # ----------- extract image data ---------------- + # st = time.time() + Vilib.img = picam2.capture_array() + # print(f'picam2.capture_array(): {time.time() - st:.6f}') + # st = time.time() + + # ----------- image gains and effects ---------------- + + # ----------- image detection and recognition ---------------- + Vilib.img = Vilib.color_detect_func(Vilib.img) + Vilib.img = Vilib.face_detect_func(Vilib.img) + Vilib.img = Vilib.traffic_detect_fuc(Vilib.img) + Vilib.img = Vilib.qrcode_detect_func(Vilib.img) + + Vilib.img = Vilib.image_classify_fuc(Vilib.img) + Vilib.img = Vilib.object_detect_fuc(Vilib.img) + Vilib.img = Vilib.hands_detect_fuc(Vilib.img) + Vilib.img = Vilib.pose_detect_fuc(Vilib.img) + + # ----------- calculate fps and draw fps ---------------- + # calculate fps + framecount += 1 + elapsed_time = float(time.time() - start_time) + if (elapsed_time > 1): + fps = round(framecount/elapsed_time, 1) + framecount = 0 + start_time = time.time() + + # print(f"elapsed_time: {elapsed_time}, fps: {fps}") + + # draw fps + if Vilib.draw_fps: + cv2.putText( + # img, # image + Vilib.img, + f"FPS: {fps}", # text + Vilib.fps_origin, # origin + cv2.FONT_HERSHEY_SIMPLEX, # font + Vilib.fps_size, # font_scale + Vilib.fps_color, # font_color + 1, # thickness + cv2.LINE_AA, # line_type: LINE_8 (default), LINE_4, LINE_AA + ) + + # ---- copy img for flask --- + # st = time.time() + Vilib.flask_img = Vilib.img + # print(f'vilib.flask_img: {time.time() - st:.6f}') + + # ----------- display on desktop ---------------- + if Vilib.imshow_flag == True: + try: + try: + prop = cv2.getWindowProperty(Vilib.Windows_Name, cv2.WND_PROP_VISIBLE) + qrcode_prop = cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) + if prop < 1 or qrcode_prop < 1: + break + except: + pass + + cv2.imshow(Vilib.Windows_Name, Vilib.img) + + if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed: + Vilib.qrcode_making_completed = False + cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) + + cv2.waitKey(1) + + except Exception as e: + Vilib.imshow_flag = False + print(f"imshow failed:\n {e}") + break + + # ----------- exit ---------------- + if Vilib.camera_run == False: + break + + # print(f'loop end: {time.time() - st:.6f}') + + except KeyboardInterrupt as e: + print(e) + finally: + picam2.close() + cv2.destroyAllWindows() + + @staticmethod + def camera_start(vflip=False, hflip=False, size=None): + if size is not None: + Vilib.camera_size = size + Vilib.camera_hflip = hflip + Vilib.camera_vflip = vflip + Vilib.camera_thread = threading.Thread(target=Vilib.camera, name="vilib") + Vilib.camera_thread.daemon = False + Vilib.camera_thread.start() + while not Vilib.camera_run: + time.sleep(0.1) + + @staticmethod + def camera_close(): + if Vilib.camera_thread != None: + Vilib.camera_run = False + time.sleep(0.2) + # Wait for camera thread to finish + if Vilib.camera_thread.is_alive(): + Vilib.camera_thread.join(timeout=3.0) + + # Properly close and reinitialize Picamera2 + try: + if Vilib.picam2 is not None: + Vilib.picam2.close() + time.sleep(0.2) + + # Recreate Picamera2 object completely fresh + Vilib.picam2 = Picamera2() + + except Exception as e: + print(f"Warning during camera cleanup: {e}") + # Force recreation of Picamera2 object + try: + Vilib.picam2 = Picamera2() + except Exception as e2: + print(f"Failed to reinitialize camera: {e2}") + + # Reset thread reference + Vilib.camera_thread = None + + @staticmethod + def display(local=True, web=True): + # cheack camera thread is_alive + if Vilib.camera_thread != None and Vilib.camera_thread.is_alive(): + # check gui + if local == True: + if 'DISPLAY' in os.environ.keys(): + Vilib.imshow_flag = True + print("Imgshow start ...") + else: + Vilib.imshow_flag = False + print("Local display failed, because there is no gui.") + # web video + if web == True: + Vilib.web_display_flag = True + print("\nWeb display on:") + wlan0, eth0 = getIP() + if wlan0 != None: + print(f" http://{wlan0}:9000/mjpg") + if eth0 != None: + print(f" http://{eth0}:9000/mjpg") + print() # new line + + # ----------- flask_thread ---------------- + if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: + print('Starting web streaming ...') + Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) + Vilib.flask_thread.daemon = True + Vilib.flask_thread.start() + else: + print('Error: Please execute < camera_start() > first.') + + @staticmethod + def show_fps(color=None, fps_size=None, fps_origin=None): + if color is not None: + Vilib.fps_color = color + if fps_size is not None: + Vilib.fps_size = fps_size + if fps_origin is not None: + Vilib.fps_origin = fps_origin + + Vilib.draw_fps = True + + @staticmethod + def hide_fps(): + Vilib.draw_fps = False + + # take photo + # ================================================================= + @staticmethod + def take_photo(photo_name, path=DEFAULLT_PICTURES_PATH): + # ----- check path ----- + if not os.path.exists(path): + # print('Path does not exist. Creating path now ... ') + os.makedirs(name=path, mode=0o751, exist_ok=True) + time.sleep(0.01) + # ----- save photo ----- + status = False + for _ in range(5): + if Vilib.img is not None: + status = cv2.imwrite(path + '/' + photo_name +'.jpg', Vilib.img) + break + else: + time.sleep(0.01) + else: + status = False + + # if status: + # print('The photo is saved as '+path+'/'+photo_name+'.jpg') + # else: + # print('Photo save failed .. ') + + return status + + + # record video + # ================================================================= + rec_video_set = {} + + rec_video_set["fourcc"] = cv2.VideoWriter_fourcc(*'XVID') + #rec_video_set["fourcc"] = cv2.cv.CV_FOURCC("D", "I", "B", " ") + + rec_video_set["fps"] = 30.0 + rec_video_set["framesize"] = (640, 480) + rec_video_set["isColor"] = True + + rec_video_set["name"] = "default" + rec_video_set["path"] = DEFAULLT_VIDEOS_PATH + + rec_video_set["start_flag"] = False + rec_video_set["stop_flag"] = False + + rec_thread = None + + @staticmethod + def rec_video_work(): + if not os.path.exists(Vilib.rec_video_set["path"]): + # print('Path does not exist. Creating path now ... ') + os.makedirs(name=Vilib.rec_video_set["path"], + mode=0o751, + exist_ok=True + ) + time.sleep(0.01) + video_out = cv2.VideoWriter(Vilib.rec_video_set["path"]+'/'+Vilib.rec_video_set["name"]+'.avi', + Vilib.rec_video_set["fourcc"], Vilib.rec_video_set["fps"], + Vilib.rec_video_set["framesize"], Vilib.rec_video_set["isColor"]) + + while True: + if Vilib.rec_video_set["start_flag"] == True: + # video_out.write(Vilib.img_array[0]) + video_out.write(Vilib.img) + if Vilib.rec_video_set["stop_flag"] == True: + video_out.release() # note need to release the video writer + Vilib.rec_video_set["start_flag"] == False + break + + @staticmethod + def rec_video_run(): + if Vilib.rec_thread != None: + Vilib.rec_video_stop() + Vilib.rec_video_set["stop_flag"] = False + Vilib.rec_thread = threading.Thread(name='rec_video', target=Vilib.rec_video_work) + Vilib.rec_thread.daemon = True + Vilib.rec_thread.start() + + @staticmethod + def rec_video_start(): + Vilib.rec_video_set["start_flag"] = True + Vilib.rec_video_set["stop_flag"] = False + + @staticmethod + def rec_video_pause(): + Vilib.rec_video_set["start_flag"] = False + + @staticmethod + def rec_video_stop(): + Vilib.rec_video_set["start_flag"] == False + Vilib.rec_video_set["stop_flag"] = True + if Vilib.rec_thread != None: + Vilib.rec_thread.join(3) + Vilib.rec_thread = None + + # color detection + # ================================================================= + @staticmethod + def color_detect(color="red"): + ''' + :param color: could be red, green, blue, yellow , orange, purple + ''' + Vilib.color_detect_color = color + from .color_detection import color_detect_work, color_obj_parameter + Vilib.color_detect_work = color_detect_work + Vilib.color_obj_parameter = color_obj_parameter + Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] + Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] + Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] + Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] + Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] + + @staticmethod + def color_detect_func(img): + if Vilib.color_detect_color is not None \ + and Vilib.color_detect_color != 'close' \ + and hasattr(Vilib, "color_detect_work"): + img = Vilib.color_detect_work(img, Vilib.camera_width, Vilib.camera_height, Vilib.color_detect_color) + Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] + Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] + Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] + Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] + Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] + return img + + @staticmethod + def close_color_detection(): + Vilib.color_detect_color = None + + # face detection + # ================================================================= + @staticmethod + def face_detect_switch(flag=False): + Vilib.face_detect_sw = flag + if Vilib.face_detect_sw: + from .face_detection import face_detect, set_face_detection_model, face_obj_parameter + Vilib.face_detect_work = face_detect + Vilib.set_face_detection_model = set_face_detection_model + Vilib.face_obj_parameter = face_obj_parameter + Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] + Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] + Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] + Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] + Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] + + @staticmethod + def face_detect_func(img): + if Vilib.face_detect_sw and hasattr(Vilib, "face_detect_work"): + img = Vilib.face_detect_work(img, Vilib.camera_width, Vilib.camera_height) + Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] + Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] + Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] + Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] + Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] + return img + + # traffic sign detection + # ================================================================= + @staticmethod + def traffic_detect_switch(flag=False): + Vilib.traffic_detect_sw = flag + if Vilib.traffic_detect_sw: + from .traffic_sign_detection import traffic_sign_detect, traffic_sign_obj_parameter + Vilib.traffic_detect_work = traffic_sign_detect + Vilib.traffic_sign_obj_parameter = traffic_sign_obj_parameter + Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] + Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] + Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] + Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] + Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] + Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] + + @staticmethod + def traffic_detect_fuc(img): + if Vilib.traffic_detect_sw and hasattr(Vilib, "traffic_detect_work"): + img = Vilib.traffic_detect_work(img, border_rgb=(255, 0, 0)) + Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] + Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] + Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] + Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] + Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] + Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] + return img + + # qrcode recognition + # ================================================================= + @staticmethod + def qrcode_detect_switch(flag=False): + Vilib.qrcode_detect_sw = flag + if Vilib.qrcode_detect_sw: + from .qrcode_recognition import qrcode_recognize, qrcode_obj_parameter + Vilib.qrcode_recognize = qrcode_recognize + Vilib.qrcode_obj_parameter = qrcode_obj_parameter + Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] + Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] + Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] + Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] + Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] + Vilib.detect_obj_parameter['qr_list'] = Vilib.qrcode_obj_parameter['list'] + + @staticmethod + def qrcode_detect_func(img): + if Vilib.qrcode_detect_sw and hasattr(Vilib, "qrcode_recognize"): + img = Vilib.qrcode_recognize(img, border_rgb=(255, 0, 0)) + Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] + Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] + Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] + Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] + Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] + return img + + # qrcode making + # ================================================================= + @staticmethod + def make_qrcode(data, + path=None, + version=1, + box_size=10, + border=4, + fill_color=(132, 112, 255), + back_color=(255, 255, 255) + ): + import qrcode # https://github.com/lincolnloop/python-qrcode + + qr = qrcode.QRCode( + version=version, + error_correction=qrcode.constants.ERROR_CORRECT_L, + box_size=box_size, + border=border, + ) + qr.add_data(data) + qr.make(fit=True) + qr_pil = qr.make_image(fill_color=fill_color, + back_color=back_color) + if path != None: + qr_pil.save(path) + + Vilib.qrcode_img = cv2.cvtColor(np.array(qr_pil), cv2.COLOR_RGB2BGR) + Vilib.qrcode_making_completed = True + + if Vilib.web_qrcode_flag: + Vilib.qrcode_img_encode = cv2.imencode('.jpg', Vilib.qrcode_img)[1].tobytes() + + + + @staticmethod + def display_qrcode_work(): + while True: + if Vilib.imshow_flag: + time.sleep(0.1) + continue + + # ----------- display qrcode on desktop ---------------- + if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed : + Vilib.qrcode_making_completed = False + try: + if len(Vilib.qrcode_img) > 10: + cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) + cv2.waitKey(1) + if cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) == 0: + cv2.destroyWindow(Vilib.qrcode_win_name) + except Exception as e: + Vilib.imshow_qrcode_flag = False + print(f"imshow qrcode failed:\n {e}") + break + time.sleep(0.1) + + @staticmethod + def display_qrcode(local=True, web=True): + # check gui + if local == True: + if 'DISPLAY' in os.environ.keys(): + Vilib.imshow_qrcode_flag = True + print("Imgshow qrcode start ...") + else: + Vilib.imshow_qrcode_flag = False + print("Local display failed, because there is no gui.") + # web video + if web == True: + Vilib.web_qrcode_flag = True + print(f'QRcode display on:') + wlan0, eth0 = getIP() + if wlan0 != None: + print(f" http://{wlan0}:9000/qrcode") + if eth0 != None: + print(f" http://{eth0}:9000/qrcode") + print() # new line + + # ----------- flask_thread ---------------- + if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: + print('Starting web streaming ...') + Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) + Vilib.flask_thread.daemon = True + Vilib.flask_thread.start() + + if Vilib.qrcode_display_thread == None or Vilib.qrcode_display_thread.is_alive() == False: + Vilib.qrcode_display_thread = threading.Thread(name='qrcode_display',target=Vilib.display_qrcode_work) + Vilib.qrcode_display_thread.daemon = True + Vilib.qrcode_display_thread.start() + + + # image classification + # ================================================================= + @staticmethod + def image_classify_switch(flag=False): + from .image_classification import image_classification_obj_parameter + Vilib.image_classify_sw = flag + Vilib.image_classification_obj_parameter = image_classification_obj_parameter + + @staticmethod + def image_classify_set_model(path): + if not os.path.exists(path): + raise ValueError('incorrect model path ') + Vilib.image_classification_model = path + + @staticmethod + def image_classify_set_labels(path): + if not os.path.exists(path): + raise ValueError('incorrect labels path ') + Vilib.image_classification_labels = path + + @staticmethod + def image_classify_fuc(img): + if Vilib.image_classify_sw == True: + # print('classify_image starting') + from .image_classification import classify_image + img = classify_image(image=img, + model=Vilib.image_classification_model, + labels=Vilib.image_classification_labels) + return img + + # objects detection + # ================================================================= + @staticmethod + def object_detect_switch(flag=False): + Vilib.objects_detect_sw = flag + if Vilib.objects_detect_sw == True: + from .objects_detection import object_detection_list_parameter + Vilib.object_detection_list_parameter = object_detection_list_parameter + + @staticmethod + def object_detect_set_model(path): + if not os.path.exists(path): + raise ValueError('incorrect model path ') + Vilib.objects_detection_model = path + + @staticmethod + def object_detect_set_labels(path): + if not os.path.exists(path): + raise ValueError('incorrect labels path ') + Vilib.objects_detection_labels = path + + @staticmethod + def object_detect_fuc(img): + if Vilib.objects_detect_sw == True: + # print('detect_objects starting') + from .objects_detection import detect_objects + img = detect_objects(image=img, + model=Vilib.objects_detection_model, + labels=Vilib.objects_detection_labels) + return img + + # hands detection + # ================================================================= + @staticmethod + def hands_detect_switch(flag=False): + from .hands_detection import DetectHands + Vilib.detect_hands = DetectHands() + Vilib.hands_detect_sw = flag + + @staticmethod + def hands_detect_fuc(img): + if Vilib.hands_detect_sw == True: + img, Vilib.detect_obj_parameter['hands_joints'] = Vilib.detect_hands.work(image=img) + return img + + # pose detection + # ================================================================= + @staticmethod + def pose_detect_switch(flag=False): + from .pose_detection import DetectPose + Vilib.pose_detect = DetectPose() + Vilib.pose_detect_sw = flag + + @staticmethod + def pose_detect_fuc(img): + if Vilib.pose_detect_sw == True and hasattr(Vilib, "pose_detect"): + img, Vilib.detect_obj_parameter['body_joints'] = Vilib.pose_detect.work(image=img) + return img diff --git a/build/lib/vilib/vilib_debug.py b/build/lib/vilib/vilib_debug.py new file mode 100644 index 0000000..f209b82 --- /dev/null +++ b/build/lib/vilib/vilib_debug.py @@ -0,0 +1,846 @@ +#!/usr/bin/env python3 + +# whther print welcome message +import os +import logging + +from .version import __version__ +if 'VILIB_WELCOME' not in os.environ or os.environ['VILIB_WELCOME'] not in [ + 'False', '0' +]: + from pkg_resources import require + picamera2_version = require('picamera2')[0].version + print(f'vilib {__version__} launching ...') + print(f'picamera2 {picamera2_version}') + +# set libcamera2 log level +os.environ['LIBCAMERA_LOG_LEVELS'] = '*:ERROR' +from picamera2 import Picamera2 +import libcamera + +import cv2 +import numpy as np +from PIL import Image, ImageDraw, ImageFont + +from flask import Flask, render_template, Response + +import time +import datetime +import threading +from multiprocessing import Process, Manager + +from .utils import * + +# user and user home directory +# ================================================================= +user = os.popen("echo ${SUDO_USER:-$(who -m | awk '{ print $1 }')}").readline().strip() +user_home = os.popen(f'getent passwd {user} | cut -d: -f 6').readline().strip() +# print(f"user: {user}") +# print(f"user_home: {user_home}") + +# Default path for pictures and videos +DEFAULLT_PICTURES_PATH = '%s/Pictures/vilib/'%user_home +DEFAULLT_VIDEOS_PATH = '%s/Videos/vilib/'%user_home + +# utils +# ================================================================= +def findContours(img): + _tuple = cv2.findContours(img, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) + # compatible with opencv3.x and openc4.x + if len(_tuple) == 3: + _, contours, hierarchy = _tuple + else: + contours, hierarchy = _tuple + return contours, hierarchy + +# flask +# ================================================================= +os.environ['FLASK_DEBUG'] = 'development' +app = Flask(__name__) + +log = logging.getLogger('werkzeug') +log.setLevel(logging.ERROR) + +@app.route('/') +def index(): + """Video streaming home page.""" + return render_template('index.html') + +def get_frame(): + return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() + +def get_qrcode_pictrue(): + return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() + +def get_png_frame(): + return cv2.imencode('.png', Vilib.flask_img)[1].tobytes() + +def get_qrcode(): + while Vilib.qrcode_img_encode is None: + time.sleep(0.2) + + return Vilib.qrcode_img_encode + +def gen(): + """Video streaming generator function.""" + while True: + # start_time = time.time() + frame = get_frame() + yield (b'--frame\r\n' + b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') + time.sleep(0.03) + # end_time = time.time() - start_time + # print('flask fps:%s'%int(1/end_time)) + +@app.route('/mjpg') ## video +def video_feed(): + # from camera import Camera + """Video streaming route. Put this in the src attribute of an img tag.""" + if Vilib.web_display_flag: + response = Response(gen(), + mimetype='multipart/x-mixed-replace; boundary=frame') + response.headers.add("Access-Control-Allow-Origin", "*") + return response + else: + tip = ''' + Please enable web display first: + Vilib.display(web=True) +''' + html = f"

{tip}

" + return Response(html, mimetype='text/html') + +@app.route('/mjpg.jpg') # jpg +def video_feed_jpg(): + # from camera import Camera + """Video streaming route. Put this in the src attribute of an img tag.""" + response = Response(get_frame(), mimetype="image/jpeg") + response.headers.add("Access-Control-Allow-Origin", "*") + return response + +@app.route('/mjpg.png') # png +def video_feed_png(): + # from camera import Camera + """Video streaming route. Put this in the src attribute of an img tag.""" + response = Response(get_png_frame(), mimetype="image/png") + response.headers.add("Access-Control-Allow-Origin", "*") + return response + +@app.route("/qrcode") +def qrcode_feed(): + qrcode_html = ''' + + + + QRcode + + + + QR Code + + +''' + return Response(qrcode_html, mimetype='text/html') + + +@app.route("/qrcode.png") +def qrcode_feed_png(): + """Video streaming route. Put this in the src attribute of an img tag.""" + if Vilib.web_qrcode_flag: + # response = Response(get_qrcode(), + # mimetype='multipart/x-mixed-replace; boundary=frame') + response = Response(get_qrcode(), mimetype="image/png") + response.headers.add("Access-Control-Allow-Origin", "*") + return response + else: + tip = ''' + Please enable web display first: + Vilib.display_qrcode(web=True) +''' + html = f"

{tip}

" + return Response(html, mimetype='text/html') + +def web_camera_start(): + try: + Vilib.flask_start = True + app.run(host='0.0.0.0', port=9000, threaded=True, debug=False) + except Exception as e: + print(e) + +# Vilib +# ================================================================= +class Vilib(object): + + picam2 = Picamera2() + + camera_size = (640, 480) + camera_width = 640 + camera_height = 480 + camera_vflip = False + camera_hflip = False + camera_run = False + + flask_thread = None + camera_thread = None + flask_start = False + + qrcode_display_thread = None + qrcode_making_completed = False + qrcode_img = Manager().list(range(1)) + qrcode_img_encode = None + qrcode_win_name = 'qrcode' + + img = Manager().list(range(1)) + flask_img = Manager().list(range(1)) + + Windows_Name = "picamera" + imshow_flag = False + web_display_flag = False + imshow_qrcode_flag = False + web_qrcode_flag = False + + draw_fps = False + fps_origin = (camera_width-105, 20) + fps_size = 0.6 + fps_color = (255, 255, 255) + + detect_obj_parameter = {} + color_detect_color = None + face_detect_sw = False + hands_detect_sw = False + pose_detect_sw = False + image_classify_sw = False + image_classification_model = None + image_classification_labels = None + objects_detect_sw = False + objects_detection_model = None + objects_detection_labels = None + qrcode_detect_sw = False + traffic_detect_sw = False + + @staticmethod + def get_instance(): + return Vilib.picam2 + + @staticmethod + def set_controls(controls): + Vilib.picam2.set_controls(controls) + + @staticmethod + def get_controls(): + return Vilib.picam2.capture_metadata() + + @staticmethod + def camera(): + Vilib.camera_width = Vilib.camera_size[0] + Vilib.camera_height = Vilib.camera_size[1] + + picam2 = Vilib.picam2 + + preview_config = picam2.preview_configuration + # preview_config.size = (800, 600) + preview_config.size = Vilib.camera_size + preview_config.format = 'RGB888' # 'XRGB8888', 'XBGR8888', 'RGB888', 'BGR888', 'YUV420' + preview_config.transform = libcamera.Transform( + hflip=Vilib.camera_hflip, + vflip=Vilib.camera_vflip + ) + preview_config.colour_space = libcamera.ColorSpace.Sycc() + preview_config.buffer_count = 4 + preview_config.queue = True + # preview_config.raw = {'size': (2304, 1296)} + preview_config.controls = {'FrameRate': 60} # change picam2.capture_array() takes time + + try: + picam2.start() + except Exception as e: + print(f"\033[38;5;1mError:\033[0m\n{e}") + print("\nPlease check whether the camera is connected well" +\ + "You can use the \"libcamea-hello\" command to test the camera" + ) + exit(1) + Vilib.camera_run = True + Vilib.fps_origin = (Vilib.camera_width-105, 20) + fps = 0 + start_time = 0 + framecount = 0 + try: + start_time = time.time() + while True: + # ----------- extract image data ---------------- + # st = time.time() + Vilib.img = picam2.capture_array() + # print(f'picam2.capture_array(): {time.time() - st:.6f}') + # st = time.time() + + # ----------- image gains and effects ---------------- + + # ----------- image detection and recognition ---------------- + Vilib.img = Vilib.color_detect_func(Vilib.img) + Vilib.img = Vilib.face_detect_func(Vilib.img) + Vilib.img = Vilib.traffic_detect_fuc(Vilib.img) + Vilib.img = Vilib.qrcode_detect_func(Vilib.img) + + Vilib.img = Vilib.image_classify_fuc(Vilib.img) + Vilib.img = Vilib.object_detect_fuc(Vilib.img) + Vilib.img = Vilib.hands_detect_fuc(Vilib.img) + Vilib.img = Vilib.pose_detect_fuc(Vilib.img) + + # ----------- calculate fps and draw fps ---------------- + # calculate fps + framecount += 1 + elapsed_time = float(time.time() - start_time) + if (elapsed_time > 1): + fps = round(framecount/elapsed_time, 1) + framecount = 0 + start_time = time.time() + + # print(f"elapsed_time: {elapsed_time}, fps: {fps}") + + # draw fps + if Vilib.draw_fps: + cv2.putText( + # img, # image + Vilib.img, + f"FPS: {fps}", # text + Vilib.fps_origin, # origin + cv2.FONT_HERSHEY_SIMPLEX, # font + Vilib.fps_size, # font_scale + Vilib.fps_color, # font_color + 1, # thickness + cv2.LINE_AA, # line_type: LINE_8 (default), LINE_4, LINE_AA + ) + + # ---- copy img for flask --- + # st = time.time() + Vilib.flask_img = Vilib.img + # print(f'vilib.flask_img: {time.time() - st:.6f}') + + # ----------- display on desktop ---------------- + if Vilib.imshow_flag == True: + try: + try: + prop = cv2.getWindowProperty(Vilib.Windows_Name, cv2.WND_PROP_VISIBLE) + qrcode_prop = cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) + if prop < 1 or qrcode_prop < 1: + break + except: + pass + + cv2.imshow(Vilib.Windows_Name, Vilib.img) + + if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed: + Vilib.qrcode_making_completed = False + cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) + + cv2.waitKey(1) + + except Exception as e: + Vilib.imshow_flag = False + print(f"imshow failed:\n {e}") + break + + # ----------- exit ---------------- + if Vilib.camera_run == False: + break + + # print(f'loop end: {time.time() - st:.6f}') + + except KeyboardInterrupt as e: + print(e) + finally: + picam2.close() + cv2.destroyAllWindows() + + @staticmethod + def camera_start(vflip=False, hflip=False, size=None): + if size is not None: + Vilib.camera_size = size + Vilib.camera_hflip = hflip + Vilib.camera_vflip = vflip + Vilib.camera_thread = threading.Thread(target=Vilib.camera, name="vilib") + Vilib.camera_thread.daemon = False + Vilib.camera_thread.start() + while not Vilib.camera_run: + time.sleep(0.1) + + @staticmethod + def camera_close(): + if Vilib.camera_thread != None: + Vilib.camera_run = False + time.sleep(0.1) + # Wait for camera thread to finish + if Vilib.camera_thread.is_alive(): + Vilib.camera_thread.join(timeout=2.0) + + # Properly close and reinitialize Picamera2 + try: + if Vilib.picam2 is not None: + Vilib.picam2.close() + time.sleep(0.1) + + # Recreate Picamera2 object completely fresh + Vilib.picam2 = Picamera2() + + # Create and configure a default preview configuration + preview_config = Vilib.picam2.create_preview_configuration() + Vilib.picam2.configure(preview_config) + + except Exception as e: + print(f"Warning during camera cleanup: {e}") + # Force recreation of Picamera2 object with basic setup + try: + Vilib.picam2 = Picamera2() + preview_config = Vilib.picam2.create_preview_configuration() + Vilib.picam2.configure(preview_config) + except Exception as e2: + print(f"Failed to reinitialize camera: {e2}") + + # Reset thread reference + Vilib.camera_thread = None + @staticmethod + def display(local=True, web=True): + # cheack camera thread is_alive + if Vilib.camera_thread != None and Vilib.camera_thread.is_alive(): + # check gui + if local == True: + if 'DISPLAY' in os.environ.keys(): + Vilib.imshow_flag = True + print("Imgshow start ...") + else: + Vilib.imshow_flag = False + print("Local display failed, because there is no gui.") + # web video + if web == True: + Vilib.web_display_flag = True + print("\nWeb display on:") + wlan0, eth0 = getIP() + if wlan0 != None: + print(f" http://{wlan0}:9000/mjpg") + if eth0 != None: + print(f" http://{eth0}:9000/mjpg") + print() # new line + + # ----------- flask_thread ---------------- + if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: + print('Starting web streaming ...') + Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) + Vilib.flask_thread.daemon = True + Vilib.flask_thread.start() + else: + print('Error: Please execute < camera_start() > first.') + + @staticmethod + def show_fps(color=None, fps_size=None, fps_origin=None): + if color is not None: + Vilib.fps_color = color + if fps_size is not None: + Vilib.fps_size = fps_size + if fps_origin is not None: + Vilib.fps_origin = fps_origin + + Vilib.draw_fps = True + + @staticmethod + def hide_fps(): + Vilib.draw_fps = False + + # take photo + # ================================================================= + @staticmethod + def take_photo(photo_name, path=DEFAULLT_PICTURES_PATH): + # ----- check path ----- + if not os.path.exists(path): + # print('Path does not exist. Creating path now ... ') + os.makedirs(name=path, mode=0o751, exist_ok=True) + time.sleep(0.01) + # ----- save photo ----- + status = False + for _ in range(5): + if Vilib.img is not None: + status = cv2.imwrite(path + '/' + photo_name +'.jpg', Vilib.img) + break + else: + time.sleep(0.01) + else: + status = False + + # if status: + # print('The photo is saved as '+path+'/'+photo_name+'.jpg') + # else: + # print('Photo save failed .. ') + + return status + + + # record video + # ================================================================= + rec_video_set = {} + + rec_video_set["fourcc"] = cv2.VideoWriter_fourcc(*'XVID') + #rec_video_set["fourcc"] = cv2.cv.CV_FOURCC("D", "I", "B", " ") + + rec_video_set["fps"] = 30.0 + rec_video_set["framesize"] = (640, 480) + rec_video_set["isColor"] = True + + rec_video_set["name"] = "default" + rec_video_set["path"] = DEFAULLT_VIDEOS_PATH + + rec_video_set["start_flag"] = False + rec_video_set["stop_flag"] = False + + rec_thread = None + + @staticmethod + def rec_video_work(): + if not os.path.exists(Vilib.rec_video_set["path"]): + # print('Path does not exist. Creating path now ... ') + os.makedirs(name=Vilib.rec_video_set["path"], + mode=0o751, + exist_ok=True + ) + time.sleep(0.01) + video_out = cv2.VideoWriter(Vilib.rec_video_set["path"]+'/'+Vilib.rec_video_set["name"]+'.avi', + Vilib.rec_video_set["fourcc"], Vilib.rec_video_set["fps"], + Vilib.rec_video_set["framesize"], Vilib.rec_video_set["isColor"]) + + while True: + if Vilib.rec_video_set["start_flag"] == True: + # video_out.write(Vilib.img_array[0]) + video_out.write(Vilib.img) + if Vilib.rec_video_set["stop_flag"] == True: + video_out.release() # note need to release the video writer + Vilib.rec_video_set["start_flag"] == False + break + + @staticmethod + def rec_video_run(): + if Vilib.rec_thread != None: + Vilib.rec_video_stop() + Vilib.rec_video_set["stop_flag"] = False + Vilib.rec_thread = threading.Thread(name='rec_video', target=Vilib.rec_video_work) + Vilib.rec_thread.daemon = True + Vilib.rec_thread.start() + + @staticmethod + def rec_video_start(): + Vilib.rec_video_set["start_flag"] = True + Vilib.rec_video_set["stop_flag"] = False + + @staticmethod + def rec_video_pause(): + Vilib.rec_video_set["start_flag"] = False + + @staticmethod + def rec_video_stop(): + Vilib.rec_video_set["start_flag"] == False + Vilib.rec_video_set["stop_flag"] = True + if Vilib.rec_thread != None: + Vilib.rec_thread.join(3) + Vilib.rec_thread = None + + # color detection + # ================================================================= + @staticmethod + def color_detect(color="red"): + ''' + :param color: could be red, green, blue, yellow , orange, purple + ''' + Vilib.color_detect_color = color + from .color_detection import color_detect_work, color_obj_parameter + Vilib.color_detect_work = color_detect_work + Vilib.color_obj_parameter = color_obj_parameter + Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] + Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] + Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] + Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] + Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] + + @staticmethod + def color_detect_func(img): + if Vilib.color_detect_color is not None \ + and Vilib.color_detect_color != 'close' \ + and hasattr(Vilib, "color_detect_work"): + img = Vilib.color_detect_work(img, Vilib.camera_width, Vilib.camera_height, Vilib.color_detect_color) + Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] + Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] + Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] + Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] + Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] + return img + + @staticmethod + def close_color_detection(): + Vilib.color_detect_color = None + + # face detection + # ================================================================= + @staticmethod + def face_detect_switch(flag=False): + Vilib.face_detect_sw = flag + if Vilib.face_detect_sw: + from .face_detection import face_detect, set_face_detection_model, face_obj_parameter + Vilib.face_detect_work = face_detect + Vilib.set_face_detection_model = set_face_detection_model + Vilib.face_obj_parameter = face_obj_parameter + Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] + Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] + Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] + Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] + Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] + + @staticmethod + def face_detect_func(img): + if Vilib.face_detect_sw and hasattr(Vilib, "face_detect_work"): + img = Vilib.face_detect_work(img, Vilib.camera_width, Vilib.camera_height) + Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] + Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] + Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] + Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] + Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] + return img + + # traffic sign detection + # ================================================================= + @staticmethod + def traffic_detect_switch(flag=False): + Vilib.traffic_detect_sw = flag + if Vilib.traffic_detect_sw: + from .traffic_sign_detection import traffic_sign_detect, traffic_sign_obj_parameter + Vilib.traffic_detect_work = traffic_sign_detect + Vilib.traffic_sign_obj_parameter = traffic_sign_obj_parameter + Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] + Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] + Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] + Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] + Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] + Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] + + @staticmethod + def traffic_detect_fuc(img): + if Vilib.traffic_detect_sw and hasattr(Vilib, "traffic_detect_work"): + img = Vilib.traffic_detect_work(img, border_rgb=(255, 0, 0)) + Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] + Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] + Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] + Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] + Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] + Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] + return img + + # qrcode recognition + # ================================================================= + @staticmethod + def qrcode_detect_switch(flag=False): + Vilib.qrcode_detect_sw = flag + if Vilib.qrcode_detect_sw: + from .qrcode_recognition import qrcode_recognize, qrcode_obj_parameter + Vilib.qrcode_recognize = qrcode_recognize + Vilib.qrcode_obj_parameter = qrcode_obj_parameter + Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] + Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] + Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] + Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] + Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] + Vilib.detect_obj_parameter['qr_list'] = Vilib.qrcode_obj_parameter['list'] + + @staticmethod + def qrcode_detect_func(img): + if Vilib.qrcode_detect_sw and hasattr(Vilib, "qrcode_recognize"): + img = Vilib.qrcode_recognize(img, border_rgb=(255, 0, 0)) + Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] + Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] + Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] + Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] + Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] + return img + + # qrcode making + # ================================================================= + @staticmethod + def make_qrcode(data, + path=None, + version=1, + box_size=10, + border=4, + fill_color=(132, 112, 255), + back_color=(255, 255, 255) + ): + import qrcode # https://github.com/lincolnloop/python-qrcode + + qr = qrcode.QRCode( + version=version, + error_correction=qrcode.constants.ERROR_CORRECT_L, + box_size=box_size, + border=border, + ) + qr.add_data(data) + qr.make(fit=True) + qr_pil = qr.make_image(fill_color=fill_color, + back_color=back_color) + if path != None: + qr_pil.save(path) + + Vilib.qrcode_img = cv2.cvtColor(np.array(qr_pil), cv2.COLOR_RGB2BGR) + Vilib.qrcode_making_completed = True + + if Vilib.web_qrcode_flag: + Vilib.qrcode_img_encode = cv2.imencode('.jpg', Vilib.qrcode_img)[1].tobytes() + + + + @staticmethod + def display_qrcode_work(): + while True: + if Vilib.imshow_flag: + time.sleep(0.1) + continue + + # ----------- display qrcode on desktop ---------------- + if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed : + Vilib.qrcode_making_completed = False + try: + if len(Vilib.qrcode_img) > 10: + cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) + cv2.waitKey(1) + if cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) == 0: + cv2.destroyWindow(Vilib.qrcode_win_name) + except Exception as e: + Vilib.imshow_qrcode_flag = False + print(f"imshow qrcode failed:\n {e}") + break + time.sleep(0.1) + + @staticmethod + def display_qrcode(local=True, web=True): + # check gui + if local == True: + if 'DISPLAY' in os.environ.keys(): + Vilib.imshow_qrcode_flag = True + print("Imgshow qrcode start ...") + else: + Vilib.imshow_qrcode_flag = False + print("Local display failed, because there is no gui.") + # web video + if web == True: + Vilib.web_qrcode_flag = True + print(f'QRcode display on:') + wlan0, eth0 = getIP() + if wlan0 != None: + print(f" http://{wlan0}:9000/qrcode") + if eth0 != None: + print(f" http://{eth0}:9000/qrcode") + print() # new line + + # ----------- flask_thread ---------------- + if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: + print('Starting web streaming ...') + Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) + Vilib.flask_thread.daemon = True + Vilib.flask_thread.start() + + if Vilib.qrcode_display_thread == None or Vilib.qrcode_display_thread.is_alive() == False: + Vilib.qrcode_display_thread = threading.Thread(name='qrcode_display',target=Vilib.display_qrcode_work) + Vilib.qrcode_display_thread.daemon = True + Vilib.qrcode_display_thread.start() + + + # image classification + # ================================================================= + @staticmethod + def image_classify_switch(flag=False): + from .image_classification import image_classification_obj_parameter + Vilib.image_classify_sw = flag + Vilib.image_classification_obj_parameter = image_classification_obj_parameter + + @staticmethod + def image_classify_set_model(path): + if not os.path.exists(path): + raise ValueError('incorrect model path ') + Vilib.image_classification_model = path + + @staticmethod + def image_classify_set_labels(path): + if not os.path.exists(path): + raise ValueError('incorrect labels path ') + Vilib.image_classification_labels = path + + @staticmethod + def image_classify_fuc(img): + if Vilib.image_classify_sw == True: + # print('classify_image starting') + from .image_classification import classify_image + img = classify_image(image=img, + model=Vilib.image_classification_model, + labels=Vilib.image_classification_labels) + return img + + # objects detection + # ================================================================= + @staticmethod + def object_detect_switch(flag=False): + Vilib.objects_detect_sw = flag + if Vilib.objects_detect_sw == True: + from .objects_detection import object_detection_list_parameter + Vilib.object_detection_list_parameter = object_detection_list_parameter + + @staticmethod + def object_detect_set_model(path): + if not os.path.exists(path): + raise ValueError('incorrect model path ') + Vilib.objects_detection_model = path + + @staticmethod + def object_detect_set_labels(path): + if not os.path.exists(path): + raise ValueError('incorrect labels path ') + Vilib.objects_detection_labels = path + + @staticmethod + def object_detect_fuc(img): + if Vilib.objects_detect_sw == True: + # print('detect_objects starting') + from .objects_detection import detect_objects + img = detect_objects(image=img, + model=Vilib.objects_detection_model, + labels=Vilib.objects_detection_labels) + return img + + # hands detection + # ================================================================= + @staticmethod + def hands_detect_switch(flag=False): + from .hands_detection import DetectHands + Vilib.detect_hands = DetectHands() + Vilib.hands_detect_sw = flag + + @staticmethod + def hands_detect_fuc(img): + if Vilib.hands_detect_sw == True: + img, Vilib.detect_obj_parameter['hands_joints'] = Vilib.detect_hands.work(image=img) + return img + + # pose detection + # ================================================================= + @staticmethod + def pose_detect_switch(flag=False): + from .pose_detection import DetectPose + Vilib.pose_detect = DetectPose() + Vilib.pose_detect_sw = flag + + @staticmethod + def pose_detect_fuc(img): + if Vilib.pose_detect_sw == True and hasattr(Vilib, "pose_detect"): + img, Vilib.detect_obj_parameter['body_joints'] = Vilib.pose_detect.work(image=img) + return img diff --git a/vilib.egg-info/PKG-INFO b/vilib.egg-info/PKG-INFO deleted file mode 100644 index 30e16ce..0000000 --- a/vilib.egg-info/PKG-INFO +++ /dev/null @@ -1,80 +0,0 @@ -Metadata-Version: 2.4 -Name: vilib -Version: 0.3.18 -Summary: Vision Library for Raspberry Pi -Author-email: SunFounder -Project-URL: Homepage, https://github.com/sunfounder/vilib -Project-URL: Bug Tracker, https://github.com/sunfounder/vilib/issues -Keywords: vilib,sunfounder,opencv,image process,visual process,sunfounder -Classifier: Programming Language :: Python :: 3 -Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) -Classifier: Operating System :: POSIX :: Linux -Requires-Python: >=3.7 -Description-Content-Type: text/markdown -License-File: LICENSE -Dynamic: license-file - - -# vilib -- Vision Library for Raspberry Pi - -Image visual processing library with multiple functions such as color recognition, face recognition, hands detection, image_classification, objects_detection, Wireless video transmission, etc... - -## Quick Links - -- [Links](#links) -- [Install](#install) -- [Usage](#usage) -- [Update](#update) -- [About SunFounder](#about-sunfounder) -- [Contact us](#contact-us) - -## Docs - - - -## Links - -- our official website: -- picamera2: -- picamera2-manual: -- geoogle mediapipe: - -## Install - -Download this repository to your Raspberry Pi: - -```bash -cd ~ -git clone -b picamera2 https://github.com/sunfounder/vilib.git --depth 1 -``` - -Install - -```bash -cd ~/vilib -sudo python3 install.py -``` - -## Usage - -```bash -cd ~/vilib/examples -sudo python3 xxx.py -``` - -Stop running the example by using Ctrl+C - -## Update - -- [CHANGELOG] - -## About SunFounder - -SunFounder is a technology company focused on Raspberry Pi and Arduino open source community development. Committed to the promotion of open source culture, we strives to bring the fun of electronics making to people all around the world and enable everyone to be a maker. Our products include learning kits, development boards, robots, sensor modules and development tools. In addition to high quality products, SunFounder also offers video tutorials to help you make your own project. If you have interest in open source or making something cool, welcome to join us! - -## Contact us - -E-mail: - - -[CHANGELOG]:https://github.com/sunfounder/vilib/blob/master/CHANGELOG.md diff --git a/vilib.egg-info/SOURCES.txt b/vilib.egg-info/SOURCES.txt deleted file mode 100644 index 54a67c8..0000000 --- a/vilib.egg-info/SOURCES.txt +++ /dev/null @@ -1,20 +0,0 @@ -LICENSE -README.md -pyproject.toml -vilib/__init__.py -vilib/color_detection.py -vilib/face_detection.py -vilib/hands_detection.py -vilib/image_classification.py -vilib/mediapipe_object_detection.py -vilib/objects_detection.py -vilib/pose_detection.py -vilib/qrcode_recognition.py -vilib/traffic_sign_detection.py -vilib/utils.py -vilib/version.py -vilib/vilib.py -vilib.egg-info/PKG-INFO -vilib.egg-info/SOURCES.txt -vilib.egg-info/dependency_links.txt -vilib.egg-info/top_level.txt \ No newline at end of file diff --git a/vilib.egg-info/dependency_links.txt b/vilib.egg-info/dependency_links.txt deleted file mode 100644 index 8b13789..0000000 --- a/vilib.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/vilib.egg-info/top_level.txt b/vilib.egg-info/top_level.txt deleted file mode 100644 index 9a425de..0000000 --- a/vilib.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -vilib diff --git a/vilib/camera_close_fix_final.txt b/vilib/camera_close_fix_final.txt new file mode 100644 index 0000000..4d30db2 --- /dev/null +++ b/vilib/camera_close_fix_final.txt @@ -0,0 +1,33 @@ + def camera_close(): + if Vilib.camera_thread != None: + Vilib.camera_run = False + time.sleep(0.1) + # Wait for camera thread to finish + if Vilib.camera_thread.is_alive(): + Vilib.camera_thread.join(timeout=2.0) + + # Properly close and reinitialize Picamera2 + try: + if Vilib.picam2 is not None: + Vilib.picam2.close() + time.sleep(0.1) + + # Recreate Picamera2 object completely fresh + Vilib.picam2 = Picamera2() + + # Create and configure a default preview configuration + preview_config = Vilib.picam2.create_preview_configuration() + Vilib.picam2.configure(preview_config) + + except Exception as e: + print(f"Warning during camera cleanup: {e}") + # Force recreation of Picamera2 object with basic setup + try: + Vilib.picam2 = Picamera2() + preview_config = Vilib.picam2.create_preview_configuration() + Vilib.picam2.configure(preview_config) + except Exception as e2: + print(f"Failed to reinitialize camera: {e2}") + + # Reset thread reference + Vilib.camera_thread = None diff --git a/vilib/camera_close_fix_v2.txt b/vilib/camera_close_fix_v2.txt new file mode 100644 index 0000000..8d5abfa --- /dev/null +++ b/vilib/camera_close_fix_v2.txt @@ -0,0 +1,30 @@ + def camera_close(): + if Vilib.camera_thread != None: + Vilib.camera_run = False + time.sleep(0.1) + # Wait for camera thread to finish + if Vilib.camera_thread.is_alive(): + Vilib.camera_thread.join(timeout=2.0) + + # Properly close and reinitialize Picamera2 + try: + if Vilib.picam2 is not None: + Vilib.picam2.close() + time.sleep(0.1) + # Recreate Picamera2 with fresh configuration + Vilib.picam2 = Picamera2() + # Important: Create the configuration objects immediately + _ = Vilib.picam2.preview_configuration + _ = Vilib.picam2.still_configuration + except Exception as e: + print(f"Warning during camera cleanup: {e}") + # Force recreation of Picamera2 object + try: + Vilib.picam2 = Picamera2() + _ = Vilib.picam2.preview_configuration + _ = Vilib.picam2.still_configuration + except: + pass + + # Reset thread reference + Vilib.camera_thread = None diff --git a/vilib/comprehensive_fix.py b/vilib/comprehensive_fix.py new file mode 100644 index 0000000..d23f0b3 --- /dev/null +++ b/vilib/comprehensive_fix.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +""" +Comprehensive fix for vilib camera threading issues +This patches both camera_close and camera methods +""" + +def apply_comprehensive_fix(): + # Read the original file + with open('vilib.py', 'r') as f: + content = f.read() + + # 1. Fix camera_close method + old_camera_close = ''' @staticmethod + def camera_close(): + if Vilib.camera_thread != None: + Vilib.camera_run = False + time.sleep(0.1)''' + + new_camera_close = ''' @staticmethod + def camera_close(): + if Vilib.camera_thread != None: + Vilib.camera_run = False + time.sleep(0.2) + # Wait for camera thread to finish + if Vilib.camera_thread.is_alive(): + Vilib.camera_thread.join(timeout=3.0) + + # Properly close and reinitialize Picamera2 + try: + if Vilib.picam2 is not None: + Vilib.picam2.close() + time.sleep(0.2) + + # Recreate Picamera2 object completely fresh + Vilib.picam2 = Picamera2() + + except Exception as e: + print(f"Warning during camera cleanup: {e}") + # Force recreation of Picamera2 object + try: + Vilib.picam2 = Picamera2() + except Exception as e2: + print(f"Failed to reinitialize camera: {e2}") + + # Reset thread reference + Vilib.camera_thread = None''' + + # 2. Fix camera method to be more robust + old_camera_start = ''' preview_config = picam2.preview_configuration + # preview_config.size = (800, 600) + preview_config.size = Vilib.camera_size''' + + new_camera_start = ''' # Ensure we have a fresh configuration + try: + preview_config = picam2.preview_configuration + if preview_config is None: + # Create new configuration if needed + config = picam2.create_preview_configuration() + picam2.configure(config) + preview_config = picam2.preview_configuration + except Exception as e: + print(f"Error getting preview configuration: {e}") + # Try to create a new configuration + config = picam2.create_preview_configuration() + picam2.configure(config) + preview_config = picam2.preview_configuration + + # preview_config.size = (800, 600) + if preview_config is not None: + preview_config.size = Vilib.camera_size''' + + # Apply the fixes + content = content.replace(old_camera_close, new_camera_close) + content = content.replace(old_camera_start, new_camera_start) + + # Write the fixed file + with open('vilib.py', 'w') as f: + f.write(content) + + print("Applied comprehensive vilib fix!") + return True + +if __name__ == "__main__": + apply_comprehensive_fix() diff --git a/vilib/vilib.py b/vilib/vilib.py index cc552f1..6008f22 100644 --- a/vilib/vilib.py +++ b/vilib/vilib.py @@ -247,9 +247,24 @@ def camera(): picam2 = Vilib.picam2 - preview_config = picam2.preview_configuration + # Ensure we have a fresh configuration + try: + preview_config = picam2.preview_configuration + if preview_config is None: + # Create new configuration if needed + config = picam2.create_preview_configuration() + picam2.configure(config) + preview_config = picam2.preview_configuration + except Exception as e: + print(f"Error getting preview configuration: {e}") + # Try to create a new configuration + config = picam2.create_preview_configuration() + picam2.configure(config) + preview_config = picam2.preview_configuration + # preview_config.size = (800, 600) - preview_config.size = Vilib.camera_size + if preview_config is not None: + preview_config.size = Vilib.camera_size preview_config.format = 'RGB888' # 'XRGB8888', 'XBGR8888', 'RGB888', 'BGR888', 'YUV420' preview_config.transform = libcamera.Transform( hflip=Vilib.camera_hflip, @@ -378,24 +393,31 @@ def camera_start(vflip=False, hflip=False, size=None): def camera_close(): if Vilib.camera_thread != None: Vilib.camera_run = False - time.sleep(0.1) + time.sleep(0.2) # Wait for camera thread to finish if Vilib.camera_thread.is_alive(): - Vilib.camera_thread.join(timeout=2.0) + Vilib.camera_thread.join(timeout=3.0) # Properly close and reinitialize Picamera2 try: if Vilib.picam2 is not None: Vilib.picam2.close() - time.sleep(0.1) - Vilib.picam2 = Picamera2() + time.sleep(0.2) + + # Recreate Picamera2 object completely fresh + Vilib.picam2 = Picamera2() + except Exception as e: print(f"Warning during camera cleanup: {e}") # Force recreation of Picamera2 object - Vilib.picam2 = Picamera2() + try: + Vilib.picam2 = Picamera2() + except Exception as e2: + print(f"Failed to reinitialize camera: {e2}") # Reset thread reference Vilib.camera_thread = None + @staticmethod def display(local=True, web=True): # cheack camera thread is_alive diff --git a/vilib/vilib_debug.py b/vilib/vilib_debug.py new file mode 100644 index 0000000..f209b82 --- /dev/null +++ b/vilib/vilib_debug.py @@ -0,0 +1,846 @@ +#!/usr/bin/env python3 + +# whther print welcome message +import os +import logging + +from .version import __version__ +if 'VILIB_WELCOME' not in os.environ or os.environ['VILIB_WELCOME'] not in [ + 'False', '0' +]: + from pkg_resources import require + picamera2_version = require('picamera2')[0].version + print(f'vilib {__version__} launching ...') + print(f'picamera2 {picamera2_version}') + +# set libcamera2 log level +os.environ['LIBCAMERA_LOG_LEVELS'] = '*:ERROR' +from picamera2 import Picamera2 +import libcamera + +import cv2 +import numpy as np +from PIL import Image, ImageDraw, ImageFont + +from flask import Flask, render_template, Response + +import time +import datetime +import threading +from multiprocessing import Process, Manager + +from .utils import * + +# user and user home directory +# ================================================================= +user = os.popen("echo ${SUDO_USER:-$(who -m | awk '{ print $1 }')}").readline().strip() +user_home = os.popen(f'getent passwd {user} | cut -d: -f 6').readline().strip() +# print(f"user: {user}") +# print(f"user_home: {user_home}") + +# Default path for pictures and videos +DEFAULLT_PICTURES_PATH = '%s/Pictures/vilib/'%user_home +DEFAULLT_VIDEOS_PATH = '%s/Videos/vilib/'%user_home + +# utils +# ================================================================= +def findContours(img): + _tuple = cv2.findContours(img, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) + # compatible with opencv3.x and openc4.x + if len(_tuple) == 3: + _, contours, hierarchy = _tuple + else: + contours, hierarchy = _tuple + return contours, hierarchy + +# flask +# ================================================================= +os.environ['FLASK_DEBUG'] = 'development' +app = Flask(__name__) + +log = logging.getLogger('werkzeug') +log.setLevel(logging.ERROR) + +@app.route('/') +def index(): + """Video streaming home page.""" + return render_template('index.html') + +def get_frame(): + return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() + +def get_qrcode_pictrue(): + return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() + +def get_png_frame(): + return cv2.imencode('.png', Vilib.flask_img)[1].tobytes() + +def get_qrcode(): + while Vilib.qrcode_img_encode is None: + time.sleep(0.2) + + return Vilib.qrcode_img_encode + +def gen(): + """Video streaming generator function.""" + while True: + # start_time = time.time() + frame = get_frame() + yield (b'--frame\r\n' + b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') + time.sleep(0.03) + # end_time = time.time() - start_time + # print('flask fps:%s'%int(1/end_time)) + +@app.route('/mjpg') ## video +def video_feed(): + # from camera import Camera + """Video streaming route. Put this in the src attribute of an img tag.""" + if Vilib.web_display_flag: + response = Response(gen(), + mimetype='multipart/x-mixed-replace; boundary=frame') + response.headers.add("Access-Control-Allow-Origin", "*") + return response + else: + tip = ''' + Please enable web display first: + Vilib.display(web=True) +''' + html = f"

{tip}

" + return Response(html, mimetype='text/html') + +@app.route('/mjpg.jpg') # jpg +def video_feed_jpg(): + # from camera import Camera + """Video streaming route. Put this in the src attribute of an img tag.""" + response = Response(get_frame(), mimetype="image/jpeg") + response.headers.add("Access-Control-Allow-Origin", "*") + return response + +@app.route('/mjpg.png') # png +def video_feed_png(): + # from camera import Camera + """Video streaming route. Put this in the src attribute of an img tag.""" + response = Response(get_png_frame(), mimetype="image/png") + response.headers.add("Access-Control-Allow-Origin", "*") + return response + +@app.route("/qrcode") +def qrcode_feed(): + qrcode_html = ''' + + + + QRcode + + + + QR Code + + +''' + return Response(qrcode_html, mimetype='text/html') + + +@app.route("/qrcode.png") +def qrcode_feed_png(): + """Video streaming route. Put this in the src attribute of an img tag.""" + if Vilib.web_qrcode_flag: + # response = Response(get_qrcode(), + # mimetype='multipart/x-mixed-replace; boundary=frame') + response = Response(get_qrcode(), mimetype="image/png") + response.headers.add("Access-Control-Allow-Origin", "*") + return response + else: + tip = ''' + Please enable web display first: + Vilib.display_qrcode(web=True) +''' + html = f"

{tip}

" + return Response(html, mimetype='text/html') + +def web_camera_start(): + try: + Vilib.flask_start = True + app.run(host='0.0.0.0', port=9000, threaded=True, debug=False) + except Exception as e: + print(e) + +# Vilib +# ================================================================= +class Vilib(object): + + picam2 = Picamera2() + + camera_size = (640, 480) + camera_width = 640 + camera_height = 480 + camera_vflip = False + camera_hflip = False + camera_run = False + + flask_thread = None + camera_thread = None + flask_start = False + + qrcode_display_thread = None + qrcode_making_completed = False + qrcode_img = Manager().list(range(1)) + qrcode_img_encode = None + qrcode_win_name = 'qrcode' + + img = Manager().list(range(1)) + flask_img = Manager().list(range(1)) + + Windows_Name = "picamera" + imshow_flag = False + web_display_flag = False + imshow_qrcode_flag = False + web_qrcode_flag = False + + draw_fps = False + fps_origin = (camera_width-105, 20) + fps_size = 0.6 + fps_color = (255, 255, 255) + + detect_obj_parameter = {} + color_detect_color = None + face_detect_sw = False + hands_detect_sw = False + pose_detect_sw = False + image_classify_sw = False + image_classification_model = None + image_classification_labels = None + objects_detect_sw = False + objects_detection_model = None + objects_detection_labels = None + qrcode_detect_sw = False + traffic_detect_sw = False + + @staticmethod + def get_instance(): + return Vilib.picam2 + + @staticmethod + def set_controls(controls): + Vilib.picam2.set_controls(controls) + + @staticmethod + def get_controls(): + return Vilib.picam2.capture_metadata() + + @staticmethod + def camera(): + Vilib.camera_width = Vilib.camera_size[0] + Vilib.camera_height = Vilib.camera_size[1] + + picam2 = Vilib.picam2 + + preview_config = picam2.preview_configuration + # preview_config.size = (800, 600) + preview_config.size = Vilib.camera_size + preview_config.format = 'RGB888' # 'XRGB8888', 'XBGR8888', 'RGB888', 'BGR888', 'YUV420' + preview_config.transform = libcamera.Transform( + hflip=Vilib.camera_hflip, + vflip=Vilib.camera_vflip + ) + preview_config.colour_space = libcamera.ColorSpace.Sycc() + preview_config.buffer_count = 4 + preview_config.queue = True + # preview_config.raw = {'size': (2304, 1296)} + preview_config.controls = {'FrameRate': 60} # change picam2.capture_array() takes time + + try: + picam2.start() + except Exception as e: + print(f"\033[38;5;1mError:\033[0m\n{e}") + print("\nPlease check whether the camera is connected well" +\ + "You can use the \"libcamea-hello\" command to test the camera" + ) + exit(1) + Vilib.camera_run = True + Vilib.fps_origin = (Vilib.camera_width-105, 20) + fps = 0 + start_time = 0 + framecount = 0 + try: + start_time = time.time() + while True: + # ----------- extract image data ---------------- + # st = time.time() + Vilib.img = picam2.capture_array() + # print(f'picam2.capture_array(): {time.time() - st:.6f}') + # st = time.time() + + # ----------- image gains and effects ---------------- + + # ----------- image detection and recognition ---------------- + Vilib.img = Vilib.color_detect_func(Vilib.img) + Vilib.img = Vilib.face_detect_func(Vilib.img) + Vilib.img = Vilib.traffic_detect_fuc(Vilib.img) + Vilib.img = Vilib.qrcode_detect_func(Vilib.img) + + Vilib.img = Vilib.image_classify_fuc(Vilib.img) + Vilib.img = Vilib.object_detect_fuc(Vilib.img) + Vilib.img = Vilib.hands_detect_fuc(Vilib.img) + Vilib.img = Vilib.pose_detect_fuc(Vilib.img) + + # ----------- calculate fps and draw fps ---------------- + # calculate fps + framecount += 1 + elapsed_time = float(time.time() - start_time) + if (elapsed_time > 1): + fps = round(framecount/elapsed_time, 1) + framecount = 0 + start_time = time.time() + + # print(f"elapsed_time: {elapsed_time}, fps: {fps}") + + # draw fps + if Vilib.draw_fps: + cv2.putText( + # img, # image + Vilib.img, + f"FPS: {fps}", # text + Vilib.fps_origin, # origin + cv2.FONT_HERSHEY_SIMPLEX, # font + Vilib.fps_size, # font_scale + Vilib.fps_color, # font_color + 1, # thickness + cv2.LINE_AA, # line_type: LINE_8 (default), LINE_4, LINE_AA + ) + + # ---- copy img for flask --- + # st = time.time() + Vilib.flask_img = Vilib.img + # print(f'vilib.flask_img: {time.time() - st:.6f}') + + # ----------- display on desktop ---------------- + if Vilib.imshow_flag == True: + try: + try: + prop = cv2.getWindowProperty(Vilib.Windows_Name, cv2.WND_PROP_VISIBLE) + qrcode_prop = cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) + if prop < 1 or qrcode_prop < 1: + break + except: + pass + + cv2.imshow(Vilib.Windows_Name, Vilib.img) + + if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed: + Vilib.qrcode_making_completed = False + cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) + + cv2.waitKey(1) + + except Exception as e: + Vilib.imshow_flag = False + print(f"imshow failed:\n {e}") + break + + # ----------- exit ---------------- + if Vilib.camera_run == False: + break + + # print(f'loop end: {time.time() - st:.6f}') + + except KeyboardInterrupt as e: + print(e) + finally: + picam2.close() + cv2.destroyAllWindows() + + @staticmethod + def camera_start(vflip=False, hflip=False, size=None): + if size is not None: + Vilib.camera_size = size + Vilib.camera_hflip = hflip + Vilib.camera_vflip = vflip + Vilib.camera_thread = threading.Thread(target=Vilib.camera, name="vilib") + Vilib.camera_thread.daemon = False + Vilib.camera_thread.start() + while not Vilib.camera_run: + time.sleep(0.1) + + @staticmethod + def camera_close(): + if Vilib.camera_thread != None: + Vilib.camera_run = False + time.sleep(0.1) + # Wait for camera thread to finish + if Vilib.camera_thread.is_alive(): + Vilib.camera_thread.join(timeout=2.0) + + # Properly close and reinitialize Picamera2 + try: + if Vilib.picam2 is not None: + Vilib.picam2.close() + time.sleep(0.1) + + # Recreate Picamera2 object completely fresh + Vilib.picam2 = Picamera2() + + # Create and configure a default preview configuration + preview_config = Vilib.picam2.create_preview_configuration() + Vilib.picam2.configure(preview_config) + + except Exception as e: + print(f"Warning during camera cleanup: {e}") + # Force recreation of Picamera2 object with basic setup + try: + Vilib.picam2 = Picamera2() + preview_config = Vilib.picam2.create_preview_configuration() + Vilib.picam2.configure(preview_config) + except Exception as e2: + print(f"Failed to reinitialize camera: {e2}") + + # Reset thread reference + Vilib.camera_thread = None + @staticmethod + def display(local=True, web=True): + # cheack camera thread is_alive + if Vilib.camera_thread != None and Vilib.camera_thread.is_alive(): + # check gui + if local == True: + if 'DISPLAY' in os.environ.keys(): + Vilib.imshow_flag = True + print("Imgshow start ...") + else: + Vilib.imshow_flag = False + print("Local display failed, because there is no gui.") + # web video + if web == True: + Vilib.web_display_flag = True + print("\nWeb display on:") + wlan0, eth0 = getIP() + if wlan0 != None: + print(f" http://{wlan0}:9000/mjpg") + if eth0 != None: + print(f" http://{eth0}:9000/mjpg") + print() # new line + + # ----------- flask_thread ---------------- + if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: + print('Starting web streaming ...') + Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) + Vilib.flask_thread.daemon = True + Vilib.flask_thread.start() + else: + print('Error: Please execute < camera_start() > first.') + + @staticmethod + def show_fps(color=None, fps_size=None, fps_origin=None): + if color is not None: + Vilib.fps_color = color + if fps_size is not None: + Vilib.fps_size = fps_size + if fps_origin is not None: + Vilib.fps_origin = fps_origin + + Vilib.draw_fps = True + + @staticmethod + def hide_fps(): + Vilib.draw_fps = False + + # take photo + # ================================================================= + @staticmethod + def take_photo(photo_name, path=DEFAULLT_PICTURES_PATH): + # ----- check path ----- + if not os.path.exists(path): + # print('Path does not exist. Creating path now ... ') + os.makedirs(name=path, mode=0o751, exist_ok=True) + time.sleep(0.01) + # ----- save photo ----- + status = False + for _ in range(5): + if Vilib.img is not None: + status = cv2.imwrite(path + '/' + photo_name +'.jpg', Vilib.img) + break + else: + time.sleep(0.01) + else: + status = False + + # if status: + # print('The photo is saved as '+path+'/'+photo_name+'.jpg') + # else: + # print('Photo save failed .. ') + + return status + + + # record video + # ================================================================= + rec_video_set = {} + + rec_video_set["fourcc"] = cv2.VideoWriter_fourcc(*'XVID') + #rec_video_set["fourcc"] = cv2.cv.CV_FOURCC("D", "I", "B", " ") + + rec_video_set["fps"] = 30.0 + rec_video_set["framesize"] = (640, 480) + rec_video_set["isColor"] = True + + rec_video_set["name"] = "default" + rec_video_set["path"] = DEFAULLT_VIDEOS_PATH + + rec_video_set["start_flag"] = False + rec_video_set["stop_flag"] = False + + rec_thread = None + + @staticmethod + def rec_video_work(): + if not os.path.exists(Vilib.rec_video_set["path"]): + # print('Path does not exist. Creating path now ... ') + os.makedirs(name=Vilib.rec_video_set["path"], + mode=0o751, + exist_ok=True + ) + time.sleep(0.01) + video_out = cv2.VideoWriter(Vilib.rec_video_set["path"]+'/'+Vilib.rec_video_set["name"]+'.avi', + Vilib.rec_video_set["fourcc"], Vilib.rec_video_set["fps"], + Vilib.rec_video_set["framesize"], Vilib.rec_video_set["isColor"]) + + while True: + if Vilib.rec_video_set["start_flag"] == True: + # video_out.write(Vilib.img_array[0]) + video_out.write(Vilib.img) + if Vilib.rec_video_set["stop_flag"] == True: + video_out.release() # note need to release the video writer + Vilib.rec_video_set["start_flag"] == False + break + + @staticmethod + def rec_video_run(): + if Vilib.rec_thread != None: + Vilib.rec_video_stop() + Vilib.rec_video_set["stop_flag"] = False + Vilib.rec_thread = threading.Thread(name='rec_video', target=Vilib.rec_video_work) + Vilib.rec_thread.daemon = True + Vilib.rec_thread.start() + + @staticmethod + def rec_video_start(): + Vilib.rec_video_set["start_flag"] = True + Vilib.rec_video_set["stop_flag"] = False + + @staticmethod + def rec_video_pause(): + Vilib.rec_video_set["start_flag"] = False + + @staticmethod + def rec_video_stop(): + Vilib.rec_video_set["start_flag"] == False + Vilib.rec_video_set["stop_flag"] = True + if Vilib.rec_thread != None: + Vilib.rec_thread.join(3) + Vilib.rec_thread = None + + # color detection + # ================================================================= + @staticmethod + def color_detect(color="red"): + ''' + :param color: could be red, green, blue, yellow , orange, purple + ''' + Vilib.color_detect_color = color + from .color_detection import color_detect_work, color_obj_parameter + Vilib.color_detect_work = color_detect_work + Vilib.color_obj_parameter = color_obj_parameter + Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] + Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] + Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] + Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] + Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] + + @staticmethod + def color_detect_func(img): + if Vilib.color_detect_color is not None \ + and Vilib.color_detect_color != 'close' \ + and hasattr(Vilib, "color_detect_work"): + img = Vilib.color_detect_work(img, Vilib.camera_width, Vilib.camera_height, Vilib.color_detect_color) + Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] + Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] + Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] + Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] + Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] + return img + + @staticmethod + def close_color_detection(): + Vilib.color_detect_color = None + + # face detection + # ================================================================= + @staticmethod + def face_detect_switch(flag=False): + Vilib.face_detect_sw = flag + if Vilib.face_detect_sw: + from .face_detection import face_detect, set_face_detection_model, face_obj_parameter + Vilib.face_detect_work = face_detect + Vilib.set_face_detection_model = set_face_detection_model + Vilib.face_obj_parameter = face_obj_parameter + Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] + Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] + Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] + Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] + Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] + + @staticmethod + def face_detect_func(img): + if Vilib.face_detect_sw and hasattr(Vilib, "face_detect_work"): + img = Vilib.face_detect_work(img, Vilib.camera_width, Vilib.camera_height) + Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] + Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] + Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] + Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] + Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] + return img + + # traffic sign detection + # ================================================================= + @staticmethod + def traffic_detect_switch(flag=False): + Vilib.traffic_detect_sw = flag + if Vilib.traffic_detect_sw: + from .traffic_sign_detection import traffic_sign_detect, traffic_sign_obj_parameter + Vilib.traffic_detect_work = traffic_sign_detect + Vilib.traffic_sign_obj_parameter = traffic_sign_obj_parameter + Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] + Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] + Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] + Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] + Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] + Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] + + @staticmethod + def traffic_detect_fuc(img): + if Vilib.traffic_detect_sw and hasattr(Vilib, "traffic_detect_work"): + img = Vilib.traffic_detect_work(img, border_rgb=(255, 0, 0)) + Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] + Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] + Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] + Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] + Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] + Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] + return img + + # qrcode recognition + # ================================================================= + @staticmethod + def qrcode_detect_switch(flag=False): + Vilib.qrcode_detect_sw = flag + if Vilib.qrcode_detect_sw: + from .qrcode_recognition import qrcode_recognize, qrcode_obj_parameter + Vilib.qrcode_recognize = qrcode_recognize + Vilib.qrcode_obj_parameter = qrcode_obj_parameter + Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] + Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] + Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] + Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] + Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] + Vilib.detect_obj_parameter['qr_list'] = Vilib.qrcode_obj_parameter['list'] + + @staticmethod + def qrcode_detect_func(img): + if Vilib.qrcode_detect_sw and hasattr(Vilib, "qrcode_recognize"): + img = Vilib.qrcode_recognize(img, border_rgb=(255, 0, 0)) + Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] + Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] + Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] + Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] + Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] + return img + + # qrcode making + # ================================================================= + @staticmethod + def make_qrcode(data, + path=None, + version=1, + box_size=10, + border=4, + fill_color=(132, 112, 255), + back_color=(255, 255, 255) + ): + import qrcode # https://github.com/lincolnloop/python-qrcode + + qr = qrcode.QRCode( + version=version, + error_correction=qrcode.constants.ERROR_CORRECT_L, + box_size=box_size, + border=border, + ) + qr.add_data(data) + qr.make(fit=True) + qr_pil = qr.make_image(fill_color=fill_color, + back_color=back_color) + if path != None: + qr_pil.save(path) + + Vilib.qrcode_img = cv2.cvtColor(np.array(qr_pil), cv2.COLOR_RGB2BGR) + Vilib.qrcode_making_completed = True + + if Vilib.web_qrcode_flag: + Vilib.qrcode_img_encode = cv2.imencode('.jpg', Vilib.qrcode_img)[1].tobytes() + + + + @staticmethod + def display_qrcode_work(): + while True: + if Vilib.imshow_flag: + time.sleep(0.1) + continue + + # ----------- display qrcode on desktop ---------------- + if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed : + Vilib.qrcode_making_completed = False + try: + if len(Vilib.qrcode_img) > 10: + cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) + cv2.waitKey(1) + if cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) == 0: + cv2.destroyWindow(Vilib.qrcode_win_name) + except Exception as e: + Vilib.imshow_qrcode_flag = False + print(f"imshow qrcode failed:\n {e}") + break + time.sleep(0.1) + + @staticmethod + def display_qrcode(local=True, web=True): + # check gui + if local == True: + if 'DISPLAY' in os.environ.keys(): + Vilib.imshow_qrcode_flag = True + print("Imgshow qrcode start ...") + else: + Vilib.imshow_qrcode_flag = False + print("Local display failed, because there is no gui.") + # web video + if web == True: + Vilib.web_qrcode_flag = True + print(f'QRcode display on:') + wlan0, eth0 = getIP() + if wlan0 != None: + print(f" http://{wlan0}:9000/qrcode") + if eth0 != None: + print(f" http://{eth0}:9000/qrcode") + print() # new line + + # ----------- flask_thread ---------------- + if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: + print('Starting web streaming ...') + Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) + Vilib.flask_thread.daemon = True + Vilib.flask_thread.start() + + if Vilib.qrcode_display_thread == None or Vilib.qrcode_display_thread.is_alive() == False: + Vilib.qrcode_display_thread = threading.Thread(name='qrcode_display',target=Vilib.display_qrcode_work) + Vilib.qrcode_display_thread.daemon = True + Vilib.qrcode_display_thread.start() + + + # image classification + # ================================================================= + @staticmethod + def image_classify_switch(flag=False): + from .image_classification import image_classification_obj_parameter + Vilib.image_classify_sw = flag + Vilib.image_classification_obj_parameter = image_classification_obj_parameter + + @staticmethod + def image_classify_set_model(path): + if not os.path.exists(path): + raise ValueError('incorrect model path ') + Vilib.image_classification_model = path + + @staticmethod + def image_classify_set_labels(path): + if not os.path.exists(path): + raise ValueError('incorrect labels path ') + Vilib.image_classification_labels = path + + @staticmethod + def image_classify_fuc(img): + if Vilib.image_classify_sw == True: + # print('classify_image starting') + from .image_classification import classify_image + img = classify_image(image=img, + model=Vilib.image_classification_model, + labels=Vilib.image_classification_labels) + return img + + # objects detection + # ================================================================= + @staticmethod + def object_detect_switch(flag=False): + Vilib.objects_detect_sw = flag + if Vilib.objects_detect_sw == True: + from .objects_detection import object_detection_list_parameter + Vilib.object_detection_list_parameter = object_detection_list_parameter + + @staticmethod + def object_detect_set_model(path): + if not os.path.exists(path): + raise ValueError('incorrect model path ') + Vilib.objects_detection_model = path + + @staticmethod + def object_detect_set_labels(path): + if not os.path.exists(path): + raise ValueError('incorrect labels path ') + Vilib.objects_detection_labels = path + + @staticmethod + def object_detect_fuc(img): + if Vilib.objects_detect_sw == True: + # print('detect_objects starting') + from .objects_detection import detect_objects + img = detect_objects(image=img, + model=Vilib.objects_detection_model, + labels=Vilib.objects_detection_labels) + return img + + # hands detection + # ================================================================= + @staticmethod + def hands_detect_switch(flag=False): + from .hands_detection import DetectHands + Vilib.detect_hands = DetectHands() + Vilib.hands_detect_sw = flag + + @staticmethod + def hands_detect_fuc(img): + if Vilib.hands_detect_sw == True: + img, Vilib.detect_obj_parameter['hands_joints'] = Vilib.detect_hands.work(image=img) + return img + + # pose detection + # ================================================================= + @staticmethod + def pose_detect_switch(flag=False): + from .pose_detection import DetectPose + Vilib.pose_detect = DetectPose() + Vilib.pose_detect_sw = flag + + @staticmethod + def pose_detect_fuc(img): + if Vilib.pose_detect_sw == True and hasattr(Vilib, "pose_detect"): + img, Vilib.detect_obj_parameter['body_joints'] = Vilib.pose_detect.work(image=img) + return img From dea8bfe97c913e858d2e28c3897b93128bd4873a Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 7 Nov 2025 18:17:29 +0100 Subject: [PATCH 3/5] Remove ignored files --- .gitignore | 1 + build/lib/vilib/__init__.py | 5 - build/lib/vilib/color_detection.py | 165 ---- build/lib/vilib/comprehensive_fix.py | 84 -- build/lib/vilib/face_detection.py | 131 --- build/lib/vilib/hands_detection.py | 43 - build/lib/vilib/image_classification.py | 213 ----- build/lib/vilib/mediapipe_object_detection.py | 137 --- build/lib/vilib/objects_detection.py | 242 ----- build/lib/vilib/pose_detection.py | 39 - build/lib/vilib/qrcode_recognition.py | 90 -- build/lib/vilib/traffic_sign_detection.py | 341 ------- build/lib/vilib/utils.py | 47 - build/lib/vilib/version.py | 1 - build/lib/vilib/vilib.py | 856 ------------------ build/lib/vilib/vilib_debug.py | 846 ----------------- 16 files changed, 1 insertion(+), 3240 deletions(-) delete mode 100644 build/lib/vilib/__init__.py delete mode 100644 build/lib/vilib/color_detection.py delete mode 100644 build/lib/vilib/comprehensive_fix.py delete mode 100644 build/lib/vilib/face_detection.py delete mode 100644 build/lib/vilib/hands_detection.py delete mode 100644 build/lib/vilib/image_classification.py delete mode 100644 build/lib/vilib/mediapipe_object_detection.py delete mode 100644 build/lib/vilib/objects_detection.py delete mode 100644 build/lib/vilib/pose_detection.py delete mode 100644 build/lib/vilib/qrcode_recognition.py delete mode 100644 build/lib/vilib/traffic_sign_detection.py delete mode 100644 build/lib/vilib/utils.py delete mode 100644 build/lib/vilib/version.py delete mode 100644 build/lib/vilib/vilib.py delete mode 100644 build/lib/vilib/vilib_debug.py diff --git a/.gitignore b/.gitignore index 0fd5f5c..52ef876 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ __pycache__ tests-dev/ bk/ +build \ No newline at end of file diff --git a/build/lib/vilib/__init__.py b/build/lib/vilib/__init__.py deleted file mode 100644 index 2a06086..0000000 --- a/build/lib/vilib/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env python3 -from .vilib import Vilib -from .version import __version__ - - diff --git a/build/lib/vilib/color_detection.py b/build/lib/vilib/color_detection.py deleted file mode 100644 index 533d718..0000000 --- a/build/lib/vilib/color_detection.py +++ /dev/null @@ -1,165 +0,0 @@ -import cv2 -import numpy as np - - -'''The range of H, S, V in HSV space for colors''' -# You can run ../examples/hsv_threshold_analyzer.py to analyze and adjust these values -color_dict = { - 'red':[[0, 8], [80, 255], [0, 255]], - 'orange':[[12, 18], [80, 255], [80, 255]], - 'yellow':[[20, 60], [60, 255], [120, 255]], - 'green':[[45, 85], [120, 255], [80, 255]], - 'blue':[[92,120], [120, 255], [80, 255]], - 'purple':[[115,155], [30, 255], [60, 255]], - 'magenta':[[160,180], [30, 255], [60, 255]], - } - -'''Define parameters for color detection object''' -color_obj_parameter = {} - -color_obj_parameter['color'] = 'red' # color to be detected - -color_obj_parameter['x'] = 320 # the largest color block center x-axis coordinate -color_obj_parameter['y'] = 240 # the largest color block center y-axis coordinate -color_obj_parameter['w'] = 0 # the largest color block pixel width -color_obj_parameter['h'] = 0 # the largest color block pixel height -color_obj_parameter['n'] = 0 # Number of color blocks detected - - -def color_detect_work(img, width, height, color_name, rectangle_color=(0, 0, 255)): - ''' - Color detection with opencv - - :param img: The detected image data - :type img: list - :param width: The width of the image data - :type width: int - :param height: The height of the image data - :type height: int - :param color_name: The name of the color to be detected. Eg: "red". For supported colors, please see [color_dict]. - :type color_name: str - :param rectangle_color: The color (BGR, tuple) of rectangle. Eg: (0, 0, 255). - :type color_name: tuple - :returns: The image returned after detection. - :rtype: Binary list - ''' - color_obj_parameter['color'] = color_name - - # Reduce image for faster recognition - zoom = 4 # reduction ratio - width_zoom = int(width / zoom) - height_zoom = int(height / zoom) - resize_img = cv2.resize(img, (width_zoom, height_zoom), interpolation=cv2.INTER_LINEAR) - - # Convert the image in BGR to HSV - hsv = cv2.cvtColor(resize_img, cv2.COLOR_BGR2HSV) - - # Set range for red color and define mask - # color_lower = np.array([min(color_dict[color_name]), 60, 60]) - # color_upper = np.array([max(color_dict[color_name]), 255, 255]) - color_lower = np.array([min(color_dict[color_name][0]), min(color_dict[color_name][1]), min(color_dict[color_name][2])]) - color_upper = np.array([max(color_dict[color_name][0]), max(color_dict[color_name][1]), max(color_dict[color_name][2])]) - - mask = cv2.inRange(hsv, color_lower, color_upper) - - if color_name == 'red': - mask_2 = cv2.inRange(hsv, (167, 0, 0), (180, 255, 255)) - mask = cv2.bitwise_or(mask, mask_2) - - # define a 5*5 kernel - kernel_5 = np.ones((5,5), np.uint8) - - # opening the image (erosion followed by dilation), to remove the image noise - open_img = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel_5, iterations=1) - - # Find contours in binary image - _tuple = cv2.findContours(open_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - # compatible with opencv3.x and openc4.x - if len(_tuple) == 3: - _, contours, hierarchy = _tuple - else: - contours, hierarchy = _tuple - - color_obj_parameter['n'] = len(contours) - - if color_obj_parameter['n'] < 1: - color_obj_parameter['x'] = width/2 - color_obj_parameter['y'] = height/2 - color_obj_parameter['w'] = 0 - color_obj_parameter['h'] = 0 - color_obj_parameter['n'] = 0 - else: - # Iterate over all contours - max_area = 0 - for contour in contours: - # Return the coordinate(top left), width and height of contour - x, y, w, h = cv2.boundingRect(contour) - if w >= 8 and h >= 8: - x = x * zoom - y = y * zoom - w = w * zoom - h = h * zoom - # Draw rectangle around the color block - cv2.rectangle(img, # image - (x, y), # start position - (x+w, y+h), # end position - rectangle_color, # color - 2, # thickness - ) - # Draw color name - cv2.putText(img, # image - color_name, # text - (x, y-5), # start position - cv2.FONT_HERSHEY_SIMPLEX, # font - 0.72, # font size - rectangle_color, # color - 1, # thickness - cv2.LINE_AA, # line_type: LINE_8 (default), LINE_4, LINE_AA - ) - else: - continue - - # Save the attribute of the largest color block - object_area = w*h - if object_area > max_area: - max_area = object_area - color_obj_parameter['x'] = int(x + w/2) - color_obj_parameter['y'] = int(y + h/2) - color_obj_parameter['w'] = w - color_obj_parameter['h'] = h - - return img - -# Test -def test(color): - print("color detection: %s"%color) - - cap = cv2.VideoCapture(0) - cap.set(3, 640) - cap.set(4, 480) - - while cap.isOpened(): - success,frame = cap.read() - if not success: - print("Ignoring empty camera frame.") - # If loading a video, use 'break' instead of 'continue'. - continue - - # frame = cv2.flip(frame, -1) # Flip camera vertically - - out_img = color_detect_work(frame, 640, 480, color) - - cv2.imshow('Color detecting ...', out_img) - - if cv2.waitKey(1) & 0xFF == ord('q'): - break - if cv2.waitKey(1) & 0xff == 27: # press 'ESC' to quit - break - if cv2.getWindowProperty('Color detecting ...', 1) < 0: - break - - cap.release() - cv2.destroyAllWindows() - -if __name__ == "__main__": - test('red') diff --git a/build/lib/vilib/comprehensive_fix.py b/build/lib/vilib/comprehensive_fix.py deleted file mode 100644 index d23f0b3..0000000 --- a/build/lib/vilib/comprehensive_fix.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python3 -""" -Comprehensive fix for vilib camera threading issues -This patches both camera_close and camera methods -""" - -def apply_comprehensive_fix(): - # Read the original file - with open('vilib.py', 'r') as f: - content = f.read() - - # 1. Fix camera_close method - old_camera_close = ''' @staticmethod - def camera_close(): - if Vilib.camera_thread != None: - Vilib.camera_run = False - time.sleep(0.1)''' - - new_camera_close = ''' @staticmethod - def camera_close(): - if Vilib.camera_thread != None: - Vilib.camera_run = False - time.sleep(0.2) - # Wait for camera thread to finish - if Vilib.camera_thread.is_alive(): - Vilib.camera_thread.join(timeout=3.0) - - # Properly close and reinitialize Picamera2 - try: - if Vilib.picam2 is not None: - Vilib.picam2.close() - time.sleep(0.2) - - # Recreate Picamera2 object completely fresh - Vilib.picam2 = Picamera2() - - except Exception as e: - print(f"Warning during camera cleanup: {e}") - # Force recreation of Picamera2 object - try: - Vilib.picam2 = Picamera2() - except Exception as e2: - print(f"Failed to reinitialize camera: {e2}") - - # Reset thread reference - Vilib.camera_thread = None''' - - # 2. Fix camera method to be more robust - old_camera_start = ''' preview_config = picam2.preview_configuration - # preview_config.size = (800, 600) - preview_config.size = Vilib.camera_size''' - - new_camera_start = ''' # Ensure we have a fresh configuration - try: - preview_config = picam2.preview_configuration - if preview_config is None: - # Create new configuration if needed - config = picam2.create_preview_configuration() - picam2.configure(config) - preview_config = picam2.preview_configuration - except Exception as e: - print(f"Error getting preview configuration: {e}") - # Try to create a new configuration - config = picam2.create_preview_configuration() - picam2.configure(config) - preview_config = picam2.preview_configuration - - # preview_config.size = (800, 600) - if preview_config is not None: - preview_config.size = Vilib.camera_size''' - - # Apply the fixes - content = content.replace(old_camera_close, new_camera_close) - content = content.replace(old_camera_start, new_camera_start) - - # Write the fixed file - with open('vilib.py', 'w') as f: - f.write(content) - - print("Applied comprehensive vilib fix!") - return True - -if __name__ == "__main__": - apply_comprehensive_fix() diff --git a/build/lib/vilib/face_detection.py b/build/lib/vilib/face_detection.py deleted file mode 100644 index ccf7565..0000000 --- a/build/lib/vilib/face_detection.py +++ /dev/null @@ -1,131 +0,0 @@ -import cv2 -# https://github.com/opencv/opencv-python - -'''Define parameters for face detection object''' -# Default model path -face_model_path = '/opt/vilib/haarcascade_frontalface_default.xml' -# face_model_path = '/opt/vilib/haarcascade_profileface.xml' - -face_obj_parameter = {} -face_obj_parameter['x'] = 320 # the largest face block center x-axis coordinate -face_obj_parameter['y'] = 240 # the largest face block center y-axis coordinate -face_obj_parameter['w'] = 0 # the largest face block pixel width -face_obj_parameter['h'] = 0 # the largest face pixel height -face_obj_parameter['n'] = 0 # Number of faces detected - -face_cascade = None - -def set_face_detection_model(model_path): - ''' - Set face detection model path - - :param model_path: The path of face haar-cascade XML classifier file - :type model_path: str - ''' - global face_cascade, face_model_path - - face_model_path = model_path - face_cascade = cv2.CascadeClassifier(face_model_path) - - -def face_detect(img, width, height, rectangle_color=(255, 0, 0)): - ''' - Face detection with opencv - - :param img: The detected image data - :type img: list - :param width: The width of the image data - :type width: int - :param height: The height of the image data - :type height: int - :param rectangle_color: The color (BGR, tuple) of rectangle. Eg: (255, 0, 0). - :type color_name: tuple - :returns: The image returned after detection. - :rtype: Binary list - ''' - global face_cascade - # Reduce image for faster recognition - zoom = 2 - width_zoom = int(width / zoom) - height_zoom = int(height / zoom) - resize_img = cv2.resize(img, (width_zoom, height_zoom), interpolation=cv2.INTER_LINEAR) - - # Converting the image to grayscale - gray_img = cv2.cvtColor(resize_img, cv2.COLOR_BGR2GRAY) - - # Loading the haar-cascade XML classifier file - if face_cascade is None: - face_cascade = cv2.CascadeClassifier(face_model_path) - - # Applying the face detection method on the grayscale image - faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.3, minNeighbors=3) - - face_obj_parameter['n'] = len(faces) - - # Iterating over all detected faces - if face_obj_parameter['n'] > 0: - max_area = 0 - for (x,y,w,h) in faces: - x = x * zoom - y = y * zoom - w = w * zoom - h = h * zoom - # Draw rectangle around the face - cv2.rectangle(img, (x, y), (x+w, y+h), rectangle_color, 2) - - # Save the attribute of the largest color block - object_area = w * h - if object_area > max_area: - max_area = object_area - face_obj_parameter['x'] = int(x + w/2) - face_obj_parameter['y'] = int(y + h/2) - face_obj_parameter['w'] = w - face_obj_parameter['h'] = h - else: - face_obj_parameter['x'] = width/2 - face_obj_parameter['y'] = height/2 - face_obj_parameter['w'] = 0 - face_obj_parameter['h'] = 0 - face_obj_parameter['n'] = 0 - - return img - -# Test -def test(): - print("face detection ...") - - cap = cv2.VideoCapture(0) - cap.set(3, 640) - cap.set(4, 480) - - while cap.isOpened(): - success,frame = cap.read() - if not success: - print("Ignoring empty camera frame.") - # If loading a video, use 'break' instead of 'continue'. - continue - - # frame = cv2.flip(frame, -1) # Flip camera vertically - - out_img = face_detect(frame, 640, 480) - - cv2.imshow('Face detecting ...', out_img) - - # if cv2.waitKey(1) & 0xFF == ord('q'): - # break - # if cv2.waitKey(1) & 0xff == 27: # press 'ESC' to quit - # break - # if cv2.getWindowProperty('Face detecting ...', 1) < 0: - # break - - key = cv2.waitKey(10) & 0xff - print(key) - - - cap.release() - cv2.destroyAllWindows() - -if __name__ == "__main__": - test() - - diff --git a/build/lib/vilib/hands_detection.py b/build/lib/vilib/hands_detection.py deleted file mode 100644 index 91e16ca..0000000 --- a/build/lib/vilib/hands_detection.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python3 -import cv2 -import mediapipe as mp -from ast import literal_eval - -mp_drawing = mp.solutions.drawing_utils -# mp_drawing_styles = mp.solutions.drawing_styles -mp_hands = mp.solutions.hands - -class DetectHands(): - def __init__(self): - self.hands = mp_hands.Hands(max_num_hands = 1, - min_detection_confidence=0.5, - min_tracking_confidence=0.5) - - def work(self,image): - joints = [] - if len(image) != 0: - # To improve performance, optionally mark the image as not writeable to - # pass by reference. - image.flags.writeable = False - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - results = self.hands.process(image) - - # Draw the hand annotations on the image. - image.flags.writeable = True - image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) - if results.multi_hand_landmarks: - for hand_landmarks in results.multi_hand_landmarks: - mp_drawing.draw_landmarks( - image, - hand_landmarks, - mp_hands.HAND_CONNECTIONS,) - # mp_drawing_styles.get_default_hand_landmarks_style(), - # mp_drawing_styles.get_default_hand_connections_style()) - joints = str(results.multi_hand_landmarks).replace('\n','').replace(' ','').replace('landmark',',').replace(',','',1) - joints = joints.replace('{x:','[').replace('y:',',').replace('z:',',').replace('}',']') - try: - joints = literal_eval(joints) - except Exception as e: - raise(e) - return image,joints - diff --git a/build/lib/vilib/image_classification.py b/build/lib/vilib/image_classification.py deleted file mode 100644 index fc07147..0000000 --- a/build/lib/vilib/image_classification.py +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import time -import os -import numpy as np - -import cv2 - -from tflite_runtime.interpreter import Interpreter -import threading - -from .utils import load_labels - -CAMERA_WIDTH = 640 -CAMERA_HEIGHT = 480 - -default_model = '/opt/vilib/mobilenet_v1_0.25_224_quant.tflite' -default_labels = '/opt/vilib/labels_mobilenet_quant_v1_224.txt' - -image_classification_obj_parameter = {} -image_classification_obj_parameter['name'] = "" # result -image_classification_obj_parameter['acc'] = 0 # accuracy - -def set_input_tensor(interpreter, image): - tensor_index = interpreter.get_input_details()[0]['index'] - input_tensor = interpreter.tensor(tensor_index)()[0] - input_tensor[:, :] = image - - -def __classify_image(interpreter, image, labels_map): - """Returns a sorted array of classification results.""" - set_input_tensor(interpreter, image) - interpreter.invoke() - output_details = interpreter.get_output_details()[0] - output = np.squeeze(interpreter.get_tensor(output_details['index'])) - - # If the model is quantized (uint8 data), then dequantize the results - if output_details['dtype'] == np.uint8: - scale, zero_point = output_details['quantization'] - output = scale * (output - zero_point) - - # for i,out in enumerate(output): - # print(labels_map[i],round(out,3)) - # print('> ',end=' ') - - # Sort the results - ordered = np.argpartition(-output, 1) - # Return the person with the highest score - return [(i, output[i]) for i in ordered[:1]] - - -results = [] -image = [] -elapsed_ms = 0 -run_flag = False - -def imgshow_fuc(input_height, input_width,labels): - - global results - global elapsed_ms - global image - global run_flag - - run_flag = True - - counter, fps = 0, 0 - start_time = time.time() - fps_avg_frame_count = 10 - - # open camera - cap = cv2.VideoCapture(0) - cap.set(3,CAMERA_WIDTH) - cap.set(4,CAMERA_HEIGHT) - print('start...') - - while cap.isOpened(): - - success,frame = cap.read() - if not success: - print("Ignoring empty camera frame.") - # If loading a video, use 'break' instead of 'continue'. - continue - - - # frame = cv2.flip(frame, -1) # Flip camera vertically - image = cv2.resize(frame,(input_width,input_height)) - - counter += 1 - if counter % fps_avg_frame_count == 0: - end_time = time.time() - fps = fps_avg_frame_count / (end_time - start_time) - start_time = time.time() - - if len(results) != 0: - label_id, prob = results[0] - cv2.putText(frame, - f"{labels[label_id]} {prob:.3f}", # text - (CAMERA_WIDTH-120, 10), # origin - cv2.FONT_HERSHEY_SIMPLEX, # font - 0.8, # font_scale - (0,255,255), # font_color - 1, # thickness - cv2.LINE_AA # line_type: LINE_8 (default), LINE_4, LINE_AA - ) - cv2.putText(frame, '%.1fms' % (elapsed_ms), (CAMERA_WIDTH-120, 40),cv2.FONT_HERSHEY_PLAIN,1, (255, 255, 225), 1) - cv2.putText(frame, 'fps %s'%round(fps,1), (CAMERA_WIDTH-120, 20),cv2.FONT_HERSHEY_PLAIN,1,(255, 255, 225),1) - cv2.imshow('Detecting...', frame) - - if cv2.waitKey(1) & 0xFF == ord('q'): - break - if cv2.waitKey(1) & 0xff == 27: # press 'ESC' to quit - break - if cv2.getWindowProperty('Detecting...',1) < 0: - break - - run_flag = False - cap.release() - cv2.destroyAllWindows() - -def main(): - # setting parameters of model and corresponding label - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument( - '--model', - help='File path of .tflite file.', - required=False, - default=default_model) - parser.add_argument( - '--labels', - help='File path of labels file.', - required=False, - default=default_labels) - args = parser.parse_args() - - # loading model and corresponding label - labels = load_labels(args.labels) - interpreter = Interpreter(args.model) - interpreter.allocate_tensors() - _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape'] - - imgshow_t = threading.Thread(target=imgshow_fuc,args=(input_height, input_width,labels)) - imgshow_t.start() - - global results - global elapsed_ms - global run_flag - - while True: - - if len(image) != 0: - start_time = time.monotonic() - results = __classify_image(interpreter, image,labels) - elapsed_ms = (time.monotonic() - start_time) * 1000 - label_id, prob = results[0] - print(labels[label_id], prob) - print(' ') - - if run_flag == False: - print('\nend...') - break - - time.sleep(0.01) - - -def classify_image(image, model=None, labels=None): - # loading model and corresponding label - if model is None: - model = default_model - if labels is None: - labels = default_labels - - if not os.path.exists(model): - print('incorrect model path ') - return image - if not os.path.exists(labels): - print('incorrect labels path ') - return image - labels = load_labels(labels) - interpreter = Interpreter(model) - interpreter.allocate_tensors() - _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape'] - - if len(image) != 0: - # resize - img = cv2.resize(image, (input_width, input_height)) - # classify - results = __classify_image(interpreter, img,labels) - label_id, prob = results[0] - # print(labels[label_id], prob) - - image_classification_obj_parameter['name'] = labels[label_id] - image_classification_obj_parameter['acc'] = prob - - # putText - cv2.putText(image, - f"{labels[label_id]} {prob:.3f}", # text - (10, 25), # origin - cv2.FONT_HERSHEY_SIMPLEX, # font - 0.8, # font_scale - (0, 255, 255), # font_color - 1, # thickness - cv2.LINE_AA # line_type: LINE_8 (default), LINE_4, LINE_AA - ) - - return image - -if __name__ == '__main__': - main() diff --git a/build/lib/vilib/mediapipe_object_detection.py b/build/lib/vilib/mediapipe_object_detection.py deleted file mode 100644 index abae0ce..0000000 --- a/build/lib/vilib/mediapipe_object_detection.py +++ /dev/null @@ -1,137 +0,0 @@ -# https://ai.google.dev/edge/mediapipe/solutions/vision/object_detector/python - -import cv2 -import numpy as np -import mediapipe as mp -import time - - -class MediapipeObjectDetection: - - # wget -q -O efficientdet.tflite -q https://storage.googleapis.com/mediapipe-models/object_detector/efficientdet_lite0/int8/1/efficientdet_lite0.tflite - DEFAULT_MODEL = '/opt/vilib/efficientdet_lite0.tflite' - - CAMERA_WIDTH = 640 - CAMERA_HEIGHT = 480 - - colors = [(0,255,255),(255,0,0),(0,255,64),(255,255,0), - (255,128,64),(128,128,255),(255,128,255),(255,128,128)] - - def __init__(self, - model:str=DEFAULT_MODEL, - max_results:int=10, - score_threshold:float=0.3, - width:int=CAMERA_WIDTH, - height:int=CAMERA_HEIGHT, - ): - """ - Args: - img: The input image. - max_results: Max number of detection results. - score_threshold: The score threshold of detection results. - model: Name of the TFLite object detection model. - width: The width of the frame captured from the camera. - height: The height of the frame captured from the camera. - """ - - # Initialize the object detection model - - BaseOptions = mp.tasks.BaseOptions - ObjectDetector = mp.tasks.vision.ObjectDetector - ObjectDetectorOptions = mp.tasks.vision.ObjectDetectorOptions - VisionRunningMode = mp.tasks.vision.RunningMode - - options = ObjectDetectorOptions( - base_options=BaseOptions(model_asset_path=model), - max_results=max_results, - score_threshold=score_threshold, - running_mode=VisionRunningMode.IMAGE) - - self.detector = ObjectDetector.create_from_options(options) - - - def detect(self, image): - rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=rgb_image) - result = self.detector.detect(mp_image) - return result - - def draw(self, image, detection_result, x_boom:int=1, y_boom:int=1): - for i, detection in enumerate(detection_result.detections): - # Draw bounding_box - bbox = detection.bounding_box - start_point = int(bbox.origin_x*x_boom), int(bbox.origin_y*y_boom) - end_point = int(bbox.origin_x*x_boom) + int(bbox.width*x_boom), int(bbox.origin_y*y_boom) + int(bbox.height*y_boom) - # Use the orange color for high visibility. - cv2.rectangle(image, - start_point, - end_point, - self.colors[i%7], - 2 - ) - - # Draw label and score - category = detection.categories[0] - category_name = category.category_name - probability = round(category.score, 2) - result_text = category_name + ' (' + str(probability) + ')' - text_location = (10 + int(bbox.origin_x*x_boom), - 18 + int(bbox.origin_y*y_boom)) - cv2.putText(image, - result_text, - text_location, - cv2.FONT_HERSHEY_DUPLEX, - 0.8, - self.colors[i%7], - 1, - cv2.LINE_AA) - - return image - - - -if __name__ == '__main__': - from picamera2 import MappedArray, Picamera2, Preview - import libcamera - import time - import cv2 - - picam2 = Picamera2() - config = picam2.create_preview_configuration( - main={"size": (640, 480), "format": "RGB888"}, - transform=libcamera.Transform(hflip=True, vflip=True) - ) - picam2.configure(config) - picam2.start() - - detector = MediapipeObjectDetection() - framecount = 0 - fps = 0.0 - start_time = time.time() - while True: - img = picam2.capture_array() - result = detector.detect(img) - img = detector.draw(img, result, x_boom=1, y_boom=1) - - # calculate fps - framecount += 1 - elapsed_time = float(time.time() - start_time) - if (elapsed_time > 1): - fps = round(framecount/elapsed_time, 1) - framecount = 0 - start_time = time.time() - - cv2.putText( - img, # image - f"FPS: {fps}", # text - (520, 20), # origin - cv2.FONT_HERSHEY_SIMPLEX, # font - 0.6, # font_scale - (255, 255, 255), # font_color - 1, # thickness - cv2.LINE_AA, # line_type: LINE_8 (default), LINE_4, LINE_AA - ) - - cv2.imshow('image', img) - cv2.waitKey(1) - diff --git a/build/lib/vilib/objects_detection.py b/build/lib/vilib/objects_detection.py deleted file mode 100644 index 56f8618..0000000 --- a/build/lib/vilib/objects_detection.py +++ /dev/null @@ -1,242 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import re -import time -import os - -import numpy as np -import cv2 -from PIL import Image -from tflite_runtime.interpreter import Interpreter -import threading - -from .utils import load_labels - -CAMERA_WIDTH = 640 -CAMERA_HEIGHT = 480 - -default_model = '/opt/vilib/detect.tflite' -default_labels = '/opt/vilib/coco_labels.txt' - -####################################################### -object_detection_list_parameter = [] - -def add_class_names(objects): - labels = load_labels(default_labels) - for object in objects: - object["class_name"] = labels[object['class_id']] - -def copy_list_into_list(source,destination): - destination.clear() - for i in source: - destination.append(i) -####################################################### - -def set_input_tensor(interpreter, image): - """Sets the input tensor.""" - tensor_index = interpreter.get_input_details()[0]['index'] - input_tensor = interpreter.tensor(tensor_index)()[0] - input_tensor[:, :] = image - - -def get_output_tensor(interpreter, index): - """Returns the output tensor at the given index.""" - output_details = interpreter.get_output_details()[index] - tensor = np.squeeze(interpreter.get_tensor(output_details['index'])) - return tensor - - -def __detect_objects(interpreter, image, threshold): - """Returns a list of detection results, each a dictionary of object info.""" - set_input_tensor(interpreter, image) - interpreter.invoke() - - # Get all output details - boxes = get_output_tensor(interpreter, 0) - classes = get_output_tensor(interpreter, 1) - scores = get_output_tensor(interpreter, 2) - count = int(get_output_tensor(interpreter, 3)) - - results = [] - for i in range(count): - if scores[i] >= threshold: - result = { - 'bounding_box': boxes[i], - 'class_id': classes[i], - 'score': scores[i] - } - results.append(result) - #global object_detection_list_parameter - # Allow programmer to access the results - copy_list_into_list(results,object_detection_list_parameter) - add_class_names(object_detection_list_parameter) - return results - - -colors = [(0,255,255),(255,0,0),(0,255,64),(255,255,0), - (255,128,64),(128,128,255),(255,128,255),(255,128,128)] - -def put_text(img,results,labels_map,width=CAMERA_WIDTH,height=CAMERA_HEIGHT): - for i,obj in enumerate(results): - # Convert the bounding box figures from relative coordinates - # to absolute coordinates based on the original resolution - ymin, xmin, ymax, xmax = obj['bounding_box'] - xmin = int(xmin * width) - xmax = int(xmax * width) - ymin = int(ymin * height) - ymax = int(ymax * height) - - cv2.rectangle(img,(xmin, ymin), (xmax, ymax),colors[i%7],2) - cv2.putText(img, - f"{labels_map[obj['class_id']]} {obj['score']:.2f}", - (xmin+6, ymin+18), - cv2.FONT_HERSHEY_PLAIN, #FONT_HERSHEY_DUPLEX - 1.2, - colors[i%7], - 1, - cv2.LINE_AA # line_type: LINE_8 (default), LINE_4, LINE_AA - ) - # print('%s %.2f' % (labels_map[obj['class_id']], obj['score'])) - # print('\n') - - return img - -# For static images: -def detect_objects(image, model=None, labels=None, width=CAMERA_WIDTH, height=CAMERA_HEIGHT, threshold=0.4): - # loading model and corresponding label - if model is None: - model = default_model - if labels is None: - labels = default_labels - - if not os.path.exists(model): - print('incorrect model path ') - return image - if not os.path.exists(labels): - print('incorrect labels path ') - return image - labels = load_labels(labels) - interpreter = Interpreter(model) - interpreter.allocate_tensors() - _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape'] - - if len(image) != 0: - # resize - img = cv2.resize(image, (input_width, input_height)) - # classify - results = __detect_objects(interpreter, img, threshold) - # putText - image = put_text(image, results, labels, width, height) - - return image - - -# For webcam: -results = [] -image = [] -elapsed_ms = 0 -run_flag = False - -def imgshow_fuc(input_height, input_width,labels): - - global results - global elapsed_ms - global image - global run_flag - - run_flag = True - - counter, fps = 0, 0 - start_time = time.time() - fps_avg_frame_count = 10 - - # open camera - cap = cv2.VideoCapture(0) - cap.set(3,CAMERA_WIDTH) - cap.set(4,CAMERA_HEIGHT) - print('start...') - - while cap.isOpened(): - ret,frame = cap.read() - # frame = cv2.flip(frame, -1) # Flip camera vertically - image = cv2.resize(frame,(input_width,input_height)) - - counter += 1 - if counter % fps_avg_frame_count == 0: - end_time = time.time() - fps = fps_avg_frame_count / (end_time - start_time) - start_time = time.time() - - img = put_text(frame,results,labels) - cv2.putText(img, '%.1fms' % (elapsed_ms), (CAMERA_WIDTH-120, 40),cv2.FONT_HERSHEY_PLAIN,1, (255, 255, 225), 1) - cv2.putText(img, 'fps %s'%round(fps,1), (CAMERA_WIDTH-120, 20),cv2.FONT_HERSHEY_PLAIN,1,(255, 255, 225),1) - cv2.imshow('Detecting...', img) - - if cv2.waitKey(1) & 0xFF == ord('q'): - break - if cv2.waitKey(1) & 0xff == 27: # press 'ESC' to quit - break - if cv2.getWindowProperty('Detecting...',1) < 0: - break - - run_flag = False - cap.release() - cv2.destroyAllWindows() - - -def main(): - # setting parameters of model and corresponding label - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument( - '--model', - help='File path of .tflite file.', - required=False, - default=default_model) - parser.add_argument( - '--labels', - help='File path of labels file.', - required=False, - default=default_labels) - parser.add_argument( - '--threshold', - help='Score threshold for detected objects.', - required=False, - type=float, - default=0.4) - args = parser.parse_args() - - # loading model and corresponding label - labels = load_labels(args.labels) - interpreter = Interpreter(args.model) - interpreter.allocate_tensors() - _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape'] - - imgshow_t = threading.Thread(target=imgshow_fuc,args=(input_height, input_width,labels)) - imgshow_t.start() - - global results - global elapsed_ms - global run_flag - - while True: - - if len(image) != 0: - start_time = time.monotonic() - results = __detect_objects(interpreter, image,args.threshold) - elapsed_ms = (time.monotonic() - start_time) * 1000 - # print(results) - - if run_flag == False: - print('\nend...') - break - - -if __name__ == '__main__': - main() - - diff --git a/build/lib/vilib/pose_detection.py b/build/lib/vilib/pose_detection.py deleted file mode 100644 index 6a7252a..0000000 --- a/build/lib/vilib/pose_detection.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python3 -import cv2 -import mediapipe as mp -from ast import literal_eval - -mp_drawing = mp.solutions.drawing_utils -# mp_drawing_styles = mp.solutions.drawing_styles -mp_pose = mp.solutions.pose - -class DetectPose(): - def __init__(self): - self.pose = mp_pose.Pose(min_detection_confidence=0.5, - min_tracking_confidence=0.5) - - def work(self,image): - joints = [] - if len(image) != 0: - # To improve performance, optionally mark the image as not writeable to - # pass by reference. - image.flags.writeable = False - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - results = self.pose.process(image) - - # Draw the pose annotation on the image. - image.flags.writeable = True - image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) - mp_drawing.draw_landmarks( - image, - results.pose_landmarks, - mp_pose.POSE_CONNECTIONS,) - # landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style()) - - joints = str(results.pose_landmarks).replace('\n','').replace(' ','').replace('landmark',',').replace(',','',1) - joints = '['+joints.replace('{x:','[').replace('y:',',').replace('z:',',').replace('visibilit','').replace('}',']')+']' - try: - joints = literal_eval(joints) - except Exception as e: - raise(e) - return image,joints diff --git a/build/lib/vilib/qrcode_recognition.py b/build/lib/vilib/qrcode_recognition.py deleted file mode 100644 index e5a82dc..0000000 --- a/build/lib/vilib/qrcode_recognition.py +++ /dev/null @@ -1,90 +0,0 @@ -import cv2 -from pyzbar import pyzbar -from PIL import Image, ImageDraw, ImageFont -import numpy as np - -'''Define parameters for qrcode recognition object''' -qrcode_obj_parameter = {} -qrcode_obj_parameter['x'] = 0 # the largest block center x-axis coordinate -qrcode_obj_parameter['y'] = 0 # the largest block center y-axis coordinate -qrcode_obj_parameter['w'] = 0 # the largest block pixel width -qrcode_obj_parameter['h'] = 0 # the largest block pixel height -qrcode_obj_parameter['data'] = "None" # recognition result -qrcode_obj_parameter['list'] = [] - -FONT_PATH = "/opt/vilib/Arial-Unicode-Regular.ttf" -FONT_SIZE = 16 -font = None - -def qrcode_recognize(img, border_rgb=(255, 0, 0), font_color=(0, 0, 255)): - global font - - # Detect and decode QR codes - barcodes = pyzbar.decode(img) - - qrcode_obj_parameter['list'].clear() - - if len(barcodes) > 0: - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = Image.fromarray(img) - draw = ImageDraw.Draw(img) - - if font is None: - font = ImageFont.truetype(FONT_PATH, FONT_SIZE, encoding="utf-8") - - for barcode in barcodes: - # Return the coordinate(top left), width and height of contour - (x, y, w, h) = barcode.rect - - # cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2) - draw.rectangle([x, y, x+w, y+h], outline=border_rgb, width=2) - - # the barcode data is a byte object, converted into a string - barcodeData = barcode.data.decode("utf-8") - # barcodeType = barcode.type - text = f"{barcodeData}" - - # add the barcode to the list of barcodes for output - qrcode_obj_parameter['list'].append({ - 'text': text, - 'x': x, - 'y': y, - 'w': w, - 'h': h, - }) - - if len(text) > 0: - qrcode_obj_parameter['data'] = text - qrcode_obj_parameter['h'] = h - qrcode_obj_parameter['w'] = w - qrcode_obj_parameter['x'] = x - qrcode_obj_parameter['y'] = y - # cv2.putText( - # img, # image - # text, # text - # (x, y - 10), # origin - # cv2.FONT_HERSHEY_SIMPLEX, # font - # 0.5, # font_scale - # border_rgb, # font_color - # 1, # thickness - # cv2.LINE_AA, # line_type: LINE_8 (default), LINE_4, LINE_AA - # ) - draw.text((x, y-FONT_SIZE-2), text, font_color, font=font) - else: - qrcode_obj_parameter['data'] = "None" - qrcode_obj_parameter['x'] = 0 - qrcode_obj_parameter['y'] = 0 - qrcode_obj_parameter['w'] = 0 - qrcode_obj_parameter['h'] = 0 - - img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) - - return img - else: - qrcode_obj_parameter['data'] = "None" - qrcode_obj_parameter['x'] = 0 - qrcode_obj_parameter['y'] = 0 - qrcode_obj_parameter['w'] = 0 - qrcode_obj_parameter['h'] = 0 - qrcode_obj_parameter['list'] = [] - return img diff --git a/build/lib/vilib/traffic_sign_detection.py b/build/lib/vilib/traffic_sign_detection.py deleted file mode 100644 index fdd3daf..0000000 --- a/build/lib/vilib/traffic_sign_detection.py +++ /dev/null @@ -1,341 +0,0 @@ -import cv2 -import numpy as np -import os -from tflite_runtime.interpreter import Interpreter -from .utils import load_labels -# https://docs.sunfounder.com/projects/picar-x-v20/en/latest/ezblock/ezblock_traffic.html -# https://github.com/sunfounder/sf-pdf/raw/master/prop_card/object_detection/traffic-sign-cards.pdf - -'''Define parameters for traffic sign detection object''' -traffic_sign_obj_parameter = {} -traffic_sign_obj_parameter['x'] = 0 # Maximum traffic sign block center x-axis coordinate -traffic_sign_obj_parameter['y'] = 0 # Maximum traffic sign block center y-axis coordinate -traffic_sign_obj_parameter['w'] = 0 # Maximum face block pixel width -traffic_sign_obj_parameter['h'] = 0 # Maximum face block pixel height -traffic_sign_obj_parameter['t'] = 'none' # traffic sign text, could be: 'none', 'stop','right','left','forward' -traffic_sign_obj_parameter['acc'] = 0 # accuracy - -'''Default model and labels''' -traffic_sign_model_path = "/opt/vilib/traffic_sign_150_dr0.2.tflite" # default model path -traffic_sign_labels_path = '/opt/vilib/traffic_sign_150_dr0.2_labels.txt' # default model path - - -def traffic_sign_predict(interpreter, img): - ''' - Traffic sign predict type - - :param img: The detected image data - :type img: list - :param img: The detected image data - :type img: list - :returns: The confidence value and index of type - :rtype: tuple (confidence:float, type:str) - ''' - _, model_width, model_height, model_depth = interpreter.get_input_details()[0]['shape'] - if model_depth != 3 and model_depth != 1: - raise ValueError('Unsupported model depth') - - # resize the image according to the model size - resize_img = cv2.resize(img, (model_width, model_height), interpolation=cv2.INTER_LINEAR) - - - flatten_img = np.reshape(resize_img, (model_width, model_height, model_depth)) - im5 = np.expand_dims(flatten_img, axis = 0) - img_np_expanded = im5.astype('float32') - - # Perform the actual detection by running the model with the image as input - tensor_index = interpreter.get_input_details()[0]['index'] - interpreter.set_tensor(tensor_index, img_np_expanded) - interpreter.invoke() - - output_details = interpreter.get_output_details()[0] - output_data = interpreter.get_tensor(output_details['index']) - - result = np.squeeze(output_data) - accuracy = round(np.max(result), 2) - type_idnex = np.where(result==np.max(result))[0][0] - - return accuracy, type_idnex - -def cnt_area(cnt): - # Return the coordinate(top left), width and height of contour - x, y, w, h = cv2.boundingRect(cnt) - return w*h - - -def traffic_sign_detect(img, model=None, labels=None, border_rgb=(255, 0, 0)): - ''' - Traffic sign detection - - :param img: The detected image data - :type img: list - :param model: The tflite model file path, if 'None' use default path - :type model: str - :param labels: The labels file path, if 'None' use default path - :type labels: str - :param border_rgb: The color (RGB, tuple) of border. Eg: (255, 0, 0). - :type color_name: tuple - :returns: The image returned after detection - :rtype: Binary list - ''' - # border_rgb to border_bgr - r, g, b = border_rgb - border_bgr = (b, g, r) - - # loading model and corresponding label - if model == None: - model = traffic_sign_model_path - if labels == None: - labels = traffic_sign_labels_path - - if not os.path.exists(model): - raise('incorrect model path ') - return img - if not os.path.exists(labels): - raise('incorrect labels path ') - return img - - labels = load_labels(labels) - interpreter = Interpreter(model) - interpreter.allocate_tensors() - - # _, model_height, model_width, _ = interpreter.get_input_details()[0]['shape'] - # print('get_input_details', interpreter.get_input_details()[0]['shape'] ) - - # get img shape - width, height, depth = np.shape(img) - - # Convert the image in BGR to HSV - hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) - - # Set range for red color and define mask - mask_red_1 = cv2.inRange(hsv, (157, 20, 20), (180, 255, 255)) - mask_red_2 = cv2.inRange(hsv, (0, 20, 20), (10, 255, 255)) - - # Set range for blue color and define mask - # mask_blue = cv2.inRange(hsv, (102, 50, 50), (125, 255, 255)) - mask_blue = cv2.inRange(hsv, (92, 10, 10), (125, 255, 255)) - - ### all - mask_all = cv2.bitwise_or(mask_red_1, mask_blue) - mask_all = cv2.bitwise_or(mask_red_2, mask_all) - - # define a 5*5 kernel - kernel_5 = np.ones((5, 5), np.uint8) - - # opening the image (erosion followed by dilation), to remove the image noise - open_img = cv2.morphologyEx(mask_all, cv2.MORPH_OPEN, kernel_5, iterations=1) - # cv2.imshow('open_img', open_img) - - # Find contours in binary image - _tuple = cv2.findContours(open_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - # compatible with opencv3.x and openc4.x - if len(_tuple) == 3: - _, contours, hierarchy = _tuple - else: - contours, hierarchy = _tuple - - # Sort contours by area from smallest to largest - contours = sorted(contours, key=cnt_area, reverse=False) - - contours_num = len(contours) - traffic_sign_num = 0 - if contours_num > 0: - # Iterate over all contours - max_area = 0 - for i in contours: - # Return the coordinate(top left), width and height of contour - x, y, w, h = cv2.boundingRect(i) - - if w > 32 and h > 32: - - # Convert img to gray, if grayscale model - model_depth = interpreter.get_input_details()[0]['shape'][3] - if model_depth == 1: - img_possible_part = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - else: - img_possible_part = img - - # Cut out the contour image - x1 = int(x) - x2 = int(x + w) - y1 = int(y) - y2 = int(y + h) - x1 = x1-8 - x1 = -x1 if x1 < 0 else x1 - x2 = x2+8 - y1 = y1-8 - y1 = -y1 if y1 < 0 else y1 - y2 = y2+8 - img_possible_part = img_possible_part[y1:y2, x1:x2] - img_possible_part = (img_possible_part / 255.0) - img_possible_part = (img_possible_part - 0.5) * 2.0 - - # cv2.imshow('img_possible_part', img_possible_part) - - # predict traffic sign type - acc_val, traffic_type = traffic_sign_predict(interpreter, img_possible_part) - # Convert confidence to percentage - acc_val = round(acc_val*100) - traffic_type = labels[traffic_type] - - if acc_val >= 85: - # print(traffic_type, acc_val) - - # If it is a forward, turn left or right traffic sign, outline a circle - if traffic_type == 'left' or \ - traffic_type == 'right' or \ - traffic_type == 'forward': - - # Convert to grayscale image and detect circle - simple_gray = cv2.cvtColor(img[y1:y2, x1:x2], cv2.COLOR_BGR2GRAY) - - circles = cv2.HoughCircles( - simple_gray, - cv2.HOUGH_GRADIENT, 1, 32, - param1=140, - param2=70, - minRadius=int(w/4.0), - maxRadius=max(w,h) - ) - # print(f'{circles}: circles') - - # Draw a circle outline, add text description - if circles is not None: - # Iterate over all circles and find the circle with the largest radius - max_radius = 0 - max_circle_index = 0 - max_circle = None - for circle in circles[0,:]: - # circle[center_xpos, center_ypos, radius] - if circle[2] > max_radius: - max_radius = circle[2] - max_circle = circle - traffic_sign_coor = (int(x+max_circle[0]),int(y+max_circle[1])) - cv2.circle(img, traffic_sign_coor, int(max_circle[2]), border_bgr, 2) - cv2.putText(img, - f"{traffic_type}:{acc_val:.1f}", - (int(x+max_circle[0]-max_circle[2]), int(y+max_circle[1]-max_circle[2]-5)), - cv2.FONT_HERSHEY_SIMPLEX, - 0.6, # font size - border_bgr, # color - 1, # thickness - cv2.LINE_AA, - ) - if w * h > max_area: - max_area = w * h - max_obj_x = x - max_obj_y = y - max_obj_w = w - max_obj_h = h - max_obj_t = traffic_type - max_obj_acc = acc_val - traffic_sign_num += 1 - - # If it is a STOP traffic sign, outline a rectangle - elif traffic_type == 'stop': - red_mask_1 = cv2.inRange(hsv[y:y+h,x:x+w],(0,50,20), (4,255,255)) # 3.inRange():介于lower/upper之间的为白色,其余黑色 - red_mask_2 = cv2.inRange(hsv[y:y+h,x:x+w],(163,50,20), (180,255,255)) - red_mask_all = cv2.bitwise_or(red_mask_1,red_mask_2) - - open_img = cv2.morphologyEx(red_mask_all, cv2.MORPH_OPEN,kernel_5,iterations=1) #开运算 - open_img = cv2.dilate(open_img, kernel_5,iterations=5) - # Find contours in binary image - _tuple = cv2.findContours(open_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - # compatible with opencv3.x and openc4.x - if len(_tuple) == 3: - _, blue_contours, hierarchy = _tuple - else: - blue_contours, hierarchy = _tuple - - contours_count = len(blue_contours) - if contours_count >=1: - blue_contours = sorted(blue_contours,key = cnt_area, reverse=True) - - epsilon = 0.025 * cv2.arcLength(blue_contours[0], True) - approx = cv2.approxPolyDP(blue_contours[0], epsilon, True) - corners = len(approx) - if corners >= 0: - traffic_sign_coor = (int(x+w/2),int(y+h/2)) - cv2.rectangle(img, (x,y), (x+w,y+h), border_bgr, 2) - cv2.putText(img, - f"{traffic_type}:{acc_val:.1f}", - (x, y-5), - cv2.FONT_HERSHEY_SIMPLEX, - 0.6, # font size - border_bgr, # color - 1, # thickness - cv2.LINE_AA, - ) - if w * h > max_area: - max_area = w * h - max_obj_x = x - max_obj_y = y - max_obj_w = w - max_obj_h = h - max_obj_t = traffic_type - max_obj_acc = acc_val - traffic_sign_num += 1 - - if traffic_sign_num > 0: - traffic_sign_obj_parameter['x'] = int(max_obj_x + max_obj_w/2) - traffic_sign_obj_parameter['y'] = int(max_obj_y + max_obj_h/2) - traffic_sign_obj_parameter['w'] = max_obj_w - traffic_sign_obj_parameter['h'] = max_obj_h - traffic_sign_obj_parameter['t'] = max_obj_t - traffic_sign_obj_parameter['acc'] = max_obj_acc - - if traffic_sign_num <= 0: - traffic_sign_obj_parameter['x'] = 0 - traffic_sign_obj_parameter['y'] = 0 - traffic_sign_obj_parameter['w'] = 0 - traffic_sign_obj_parameter['h'] = 0 - traffic_sign_obj_parameter['t'] = 'none' - traffic_sign_obj_parameter['acc'] = 0 - - # print(f'traffic_sign_num {traffic_sign_num}') - return img - - -# Test -def test(): - print("traffic sign detection ...") - - from picamera2 import Picamera2 - import libcamera - - picam2 = Picamera2() - preview_config = picam2.preview_configuration - # preview_config.size = (800, 600) - preview_config.size = (640, 480) - preview_config.format = 'RGB888' # 'XRGB8888', 'XBGR8888', 'RGB888', 'BGR888', 'YUV420' - preview_config.transform = libcamera.Transform(hflip=False, - vflip=False) - preview_config.colour_space = libcamera.ColorSpace.Sycc() - preview_config.buffer_count = 4 - preview_config.queue = True - - picam2.start() - - while True: - frame = picam2.capture_array() - - # frame = cv2.flip(frame, 0) # Flip camera horizontally - # frame = cv2.flip(frame, 1) # Flip camera vertically - # frame = cv2.flip(frame, -1) # Flip camera vertically & horizontally - - out_img = traffic_sign_detect(frame, border_rgb=(255, 255, 0)) - - cv2.imshow('Traffic sign detecting ...', out_img) - - if cv2.waitKey(1) & 0xFF == ord('q'): - break - if cv2.waitKey(1) & 0xff == 27: # press 'ESC' to quit - break - if cv2.getWindowProperty('Traffic sign detecting ...', 1) < 0: - break - - cv2.destroyAllWindows() - -if __name__ == "__main__": - test() diff --git a/build/lib/vilib/utils.py b/build/lib/vilib/utils.py deleted file mode 100644 index 560cc2f..0000000 --- a/build/lib/vilib/utils.py +++ /dev/null @@ -1,47 +0,0 @@ -import os - -# utils -# ================================================================= -def run_command(cmd): - import subprocess - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - result = p.stdout.read().decode('utf-8') - status = p.poll() - return status, result - -def getIP(): - wlan0 = os.popen("ifconfig wlan0 |awk '/inet/'|awk 'NR==1 {print $2}'").readline().strip('\n') - eth0 = os.popen("ifconfig eth0 |awk '/inet/'|awk 'NR==1 {print $2}'").readline().strip('\n') - - if wlan0 == '': - wlan0 = None - if eth0 == '': - eth0 = None - - return wlan0,eth0 - -def check_machine_type(): - import platform - machine_type = platform.machine() - if machine_type == "armv7l": - return 32, machine_type - elif machine_type == "aarch64": - return 64, machine_type - else: - raise ValueError(f"[{machine_type}] not supported") - -def load_labels(path): - """Loads the labels file. Supports files with or without index numbers.""" - import re - - with open(path, 'r', encoding='utf-8') as f: - lines = f.readlines() - labels = {} - for row_number, content in enumerate(lines): - pair = re.split(r'[:\s]+', content.strip(), maxsplit=1) - if len(pair) == 2 and pair[0].strip().isdigit(): - labels[int(pair[0])] = pair[1].strip() - else: - labels[row_number] = pair[0].strip() - return labels \ No newline at end of file diff --git a/build/lib/vilib/version.py b/build/lib/vilib/version.py deleted file mode 100644 index 50d85c8..0000000 --- a/build/lib/vilib/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.3.18" diff --git a/build/lib/vilib/vilib.py b/build/lib/vilib/vilib.py deleted file mode 100644 index 6008f22..0000000 --- a/build/lib/vilib/vilib.py +++ /dev/null @@ -1,856 +0,0 @@ -#!/usr/bin/env python3 - -# whther print welcome message -import os -import logging - -from .version import __version__ -if 'VILIB_WELCOME' not in os.environ or os.environ['VILIB_WELCOME'] not in [ - 'False', '0' -]: - from pkg_resources import require - picamera2_version = require('picamera2')[0].version - print(f'vilib {__version__} launching ...') - print(f'picamera2 {picamera2_version}') - -# set libcamera2 log level -os.environ['LIBCAMERA_LOG_LEVELS'] = '*:ERROR' -from picamera2 import Picamera2 -import libcamera - -import cv2 -import numpy as np -from PIL import Image, ImageDraw, ImageFont - -from flask import Flask, render_template, Response - -import time -import datetime -import threading -from multiprocessing import Process, Manager - -from .utils import * - -# user and user home directory -# ================================================================= -user = os.popen("echo ${SUDO_USER:-$(who -m | awk '{ print $1 }')}").readline().strip() -user_home = os.popen(f'getent passwd {user} | cut -d: -f 6').readline().strip() -# print(f"user: {user}") -# print(f"user_home: {user_home}") - -# Default path for pictures and videos -DEFAULLT_PICTURES_PATH = '%s/Pictures/vilib/'%user_home -DEFAULLT_VIDEOS_PATH = '%s/Videos/vilib/'%user_home - -# utils -# ================================================================= -def findContours(img): - _tuple = cv2.findContours(img, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) - # compatible with opencv3.x and openc4.x - if len(_tuple) == 3: - _, contours, hierarchy = _tuple - else: - contours, hierarchy = _tuple - return contours, hierarchy - -# flask -# ================================================================= -os.environ['FLASK_DEBUG'] = 'development' -app = Flask(__name__) - -log = logging.getLogger('werkzeug') -log.setLevel(logging.ERROR) - -@app.route('/') -def index(): - """Video streaming home page.""" - return render_template('index.html') - -def get_frame(): - return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() - -def get_qrcode_pictrue(): - return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() - -def get_png_frame(): - return cv2.imencode('.png', Vilib.flask_img)[1].tobytes() - -def get_qrcode(): - while Vilib.qrcode_img_encode is None: - time.sleep(0.2) - - return Vilib.qrcode_img_encode - -def gen(): - """Video streaming generator function.""" - while True: - # start_time = time.time() - frame = get_frame() - yield (b'--frame\r\n' - b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') - time.sleep(0.03) - # end_time = time.time() - start_time - # print('flask fps:%s'%int(1/end_time)) - -@app.route('/mjpg') ## video -def video_feed(): - # from camera import Camera - """Video streaming route. Put this in the src attribute of an img tag.""" - if Vilib.web_display_flag: - response = Response(gen(), - mimetype='multipart/x-mixed-replace; boundary=frame') - response.headers.add("Access-Control-Allow-Origin", "*") - return response - else: - tip = ''' - Please enable web display first: - Vilib.display(web=True) -''' - html = f"

{tip}

" - return Response(html, mimetype='text/html') - -@app.route('/mjpg.jpg') # jpg -def video_feed_jpg(): - # from camera import Camera - """Video streaming route. Put this in the src attribute of an img tag.""" - response = Response(get_frame(), mimetype="image/jpeg") - response.headers.add("Access-Control-Allow-Origin", "*") - return response - -@app.route('/mjpg.png') # png -def video_feed_png(): - # from camera import Camera - """Video streaming route. Put this in the src attribute of an img tag.""" - response = Response(get_png_frame(), mimetype="image/png") - response.headers.add("Access-Control-Allow-Origin", "*") - return response - -@app.route("/qrcode") -def qrcode_feed(): - qrcode_html = ''' - - - - QRcode - - - - QR Code - - -''' - return Response(qrcode_html, mimetype='text/html') - - -@app.route("/qrcode.png") -def qrcode_feed_png(): - """Video streaming route. Put this in the src attribute of an img tag.""" - if Vilib.web_qrcode_flag: - # response = Response(get_qrcode(), - # mimetype='multipart/x-mixed-replace; boundary=frame') - response = Response(get_qrcode(), mimetype="image/png") - response.headers.add("Access-Control-Allow-Origin", "*") - return response - else: - tip = ''' - Please enable web display first: - Vilib.display_qrcode(web=True) -''' - html = f"

{tip}

" - return Response(html, mimetype='text/html') - -def web_camera_start(): - try: - Vilib.flask_start = True - app.run(host='0.0.0.0', port=9000, threaded=True, debug=False) - except Exception as e: - print(e) - -# Vilib -# ================================================================= -class Vilib(object): - - picam2 = Picamera2() - - camera_size = (640, 480) - camera_width = 640 - camera_height = 480 - camera_vflip = False - camera_hflip = False - camera_run = False - - flask_thread = None - camera_thread = None - flask_start = False - - qrcode_display_thread = None - qrcode_making_completed = False - qrcode_img = Manager().list(range(1)) - qrcode_img_encode = None - qrcode_win_name = 'qrcode' - - img = Manager().list(range(1)) - flask_img = Manager().list(range(1)) - - Windows_Name = "picamera" - imshow_flag = False - web_display_flag = False - imshow_qrcode_flag = False - web_qrcode_flag = False - - draw_fps = False - fps_origin = (camera_width-105, 20) - fps_size = 0.6 - fps_color = (255, 255, 255) - - detect_obj_parameter = {} - color_detect_color = None - face_detect_sw = False - hands_detect_sw = False - pose_detect_sw = False - image_classify_sw = False - image_classification_model = None - image_classification_labels = None - objects_detect_sw = False - objects_detection_model = None - objects_detection_labels = None - qrcode_detect_sw = False - traffic_detect_sw = False - - @staticmethod - def get_instance(): - return Vilib.picam2 - - @staticmethod - def set_controls(controls): - Vilib.picam2.set_controls(controls) - - @staticmethod - def get_controls(): - return Vilib.picam2.capture_metadata() - - @staticmethod - def camera(): - Vilib.camera_width = Vilib.camera_size[0] - Vilib.camera_height = Vilib.camera_size[1] - - picam2 = Vilib.picam2 - - # Ensure we have a fresh configuration - try: - preview_config = picam2.preview_configuration - if preview_config is None: - # Create new configuration if needed - config = picam2.create_preview_configuration() - picam2.configure(config) - preview_config = picam2.preview_configuration - except Exception as e: - print(f"Error getting preview configuration: {e}") - # Try to create a new configuration - config = picam2.create_preview_configuration() - picam2.configure(config) - preview_config = picam2.preview_configuration - - # preview_config.size = (800, 600) - if preview_config is not None: - preview_config.size = Vilib.camera_size - preview_config.format = 'RGB888' # 'XRGB8888', 'XBGR8888', 'RGB888', 'BGR888', 'YUV420' - preview_config.transform = libcamera.Transform( - hflip=Vilib.camera_hflip, - vflip=Vilib.camera_vflip - ) - preview_config.colour_space = libcamera.ColorSpace.Sycc() - preview_config.buffer_count = 4 - preview_config.queue = True - # preview_config.raw = {'size': (2304, 1296)} - preview_config.controls = {'FrameRate': 60} # change picam2.capture_array() takes time - - try: - picam2.start() - except Exception as e: - print(f"\033[38;5;1mError:\033[0m\n{e}") - print("\nPlease check whether the camera is connected well" +\ - "You can use the \"libcamea-hello\" command to test the camera" - ) - exit(1) - Vilib.camera_run = True - Vilib.fps_origin = (Vilib.camera_width-105, 20) - fps = 0 - start_time = 0 - framecount = 0 - try: - start_time = time.time() - while True: - # ----------- extract image data ---------------- - # st = time.time() - Vilib.img = picam2.capture_array() - # print(f'picam2.capture_array(): {time.time() - st:.6f}') - # st = time.time() - - # ----------- image gains and effects ---------------- - - # ----------- image detection and recognition ---------------- - Vilib.img = Vilib.color_detect_func(Vilib.img) - Vilib.img = Vilib.face_detect_func(Vilib.img) - Vilib.img = Vilib.traffic_detect_fuc(Vilib.img) - Vilib.img = Vilib.qrcode_detect_func(Vilib.img) - - Vilib.img = Vilib.image_classify_fuc(Vilib.img) - Vilib.img = Vilib.object_detect_fuc(Vilib.img) - Vilib.img = Vilib.hands_detect_fuc(Vilib.img) - Vilib.img = Vilib.pose_detect_fuc(Vilib.img) - - # ----------- calculate fps and draw fps ---------------- - # calculate fps - framecount += 1 - elapsed_time = float(time.time() - start_time) - if (elapsed_time > 1): - fps = round(framecount/elapsed_time, 1) - framecount = 0 - start_time = time.time() - - # print(f"elapsed_time: {elapsed_time}, fps: {fps}") - - # draw fps - if Vilib.draw_fps: - cv2.putText( - # img, # image - Vilib.img, - f"FPS: {fps}", # text - Vilib.fps_origin, # origin - cv2.FONT_HERSHEY_SIMPLEX, # font - Vilib.fps_size, # font_scale - Vilib.fps_color, # font_color - 1, # thickness - cv2.LINE_AA, # line_type: LINE_8 (default), LINE_4, LINE_AA - ) - - # ---- copy img for flask --- - # st = time.time() - Vilib.flask_img = Vilib.img - # print(f'vilib.flask_img: {time.time() - st:.6f}') - - # ----------- display on desktop ---------------- - if Vilib.imshow_flag == True: - try: - try: - prop = cv2.getWindowProperty(Vilib.Windows_Name, cv2.WND_PROP_VISIBLE) - qrcode_prop = cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) - if prop < 1 or qrcode_prop < 1: - break - except: - pass - - cv2.imshow(Vilib.Windows_Name, Vilib.img) - - if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed: - Vilib.qrcode_making_completed = False - cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) - - cv2.waitKey(1) - - except Exception as e: - Vilib.imshow_flag = False - print(f"imshow failed:\n {e}") - break - - # ----------- exit ---------------- - if Vilib.camera_run == False: - break - - # print(f'loop end: {time.time() - st:.6f}') - - except KeyboardInterrupt as e: - print(e) - finally: - picam2.close() - cv2.destroyAllWindows() - - @staticmethod - def camera_start(vflip=False, hflip=False, size=None): - if size is not None: - Vilib.camera_size = size - Vilib.camera_hflip = hflip - Vilib.camera_vflip = vflip - Vilib.camera_thread = threading.Thread(target=Vilib.camera, name="vilib") - Vilib.camera_thread.daemon = False - Vilib.camera_thread.start() - while not Vilib.camera_run: - time.sleep(0.1) - - @staticmethod - def camera_close(): - if Vilib.camera_thread != None: - Vilib.camera_run = False - time.sleep(0.2) - # Wait for camera thread to finish - if Vilib.camera_thread.is_alive(): - Vilib.camera_thread.join(timeout=3.0) - - # Properly close and reinitialize Picamera2 - try: - if Vilib.picam2 is not None: - Vilib.picam2.close() - time.sleep(0.2) - - # Recreate Picamera2 object completely fresh - Vilib.picam2 = Picamera2() - - except Exception as e: - print(f"Warning during camera cleanup: {e}") - # Force recreation of Picamera2 object - try: - Vilib.picam2 = Picamera2() - except Exception as e2: - print(f"Failed to reinitialize camera: {e2}") - - # Reset thread reference - Vilib.camera_thread = None - - @staticmethod - def display(local=True, web=True): - # cheack camera thread is_alive - if Vilib.camera_thread != None and Vilib.camera_thread.is_alive(): - # check gui - if local == True: - if 'DISPLAY' in os.environ.keys(): - Vilib.imshow_flag = True - print("Imgshow start ...") - else: - Vilib.imshow_flag = False - print("Local display failed, because there is no gui.") - # web video - if web == True: - Vilib.web_display_flag = True - print("\nWeb display on:") - wlan0, eth0 = getIP() - if wlan0 != None: - print(f" http://{wlan0}:9000/mjpg") - if eth0 != None: - print(f" http://{eth0}:9000/mjpg") - print() # new line - - # ----------- flask_thread ---------------- - if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: - print('Starting web streaming ...') - Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) - Vilib.flask_thread.daemon = True - Vilib.flask_thread.start() - else: - print('Error: Please execute < camera_start() > first.') - - @staticmethod - def show_fps(color=None, fps_size=None, fps_origin=None): - if color is not None: - Vilib.fps_color = color - if fps_size is not None: - Vilib.fps_size = fps_size - if fps_origin is not None: - Vilib.fps_origin = fps_origin - - Vilib.draw_fps = True - - @staticmethod - def hide_fps(): - Vilib.draw_fps = False - - # take photo - # ================================================================= - @staticmethod - def take_photo(photo_name, path=DEFAULLT_PICTURES_PATH): - # ----- check path ----- - if not os.path.exists(path): - # print('Path does not exist. Creating path now ... ') - os.makedirs(name=path, mode=0o751, exist_ok=True) - time.sleep(0.01) - # ----- save photo ----- - status = False - for _ in range(5): - if Vilib.img is not None: - status = cv2.imwrite(path + '/' + photo_name +'.jpg', Vilib.img) - break - else: - time.sleep(0.01) - else: - status = False - - # if status: - # print('The photo is saved as '+path+'/'+photo_name+'.jpg') - # else: - # print('Photo save failed .. ') - - return status - - - # record video - # ================================================================= - rec_video_set = {} - - rec_video_set["fourcc"] = cv2.VideoWriter_fourcc(*'XVID') - #rec_video_set["fourcc"] = cv2.cv.CV_FOURCC("D", "I", "B", " ") - - rec_video_set["fps"] = 30.0 - rec_video_set["framesize"] = (640, 480) - rec_video_set["isColor"] = True - - rec_video_set["name"] = "default" - rec_video_set["path"] = DEFAULLT_VIDEOS_PATH - - rec_video_set["start_flag"] = False - rec_video_set["stop_flag"] = False - - rec_thread = None - - @staticmethod - def rec_video_work(): - if not os.path.exists(Vilib.rec_video_set["path"]): - # print('Path does not exist. Creating path now ... ') - os.makedirs(name=Vilib.rec_video_set["path"], - mode=0o751, - exist_ok=True - ) - time.sleep(0.01) - video_out = cv2.VideoWriter(Vilib.rec_video_set["path"]+'/'+Vilib.rec_video_set["name"]+'.avi', - Vilib.rec_video_set["fourcc"], Vilib.rec_video_set["fps"], - Vilib.rec_video_set["framesize"], Vilib.rec_video_set["isColor"]) - - while True: - if Vilib.rec_video_set["start_flag"] == True: - # video_out.write(Vilib.img_array[0]) - video_out.write(Vilib.img) - if Vilib.rec_video_set["stop_flag"] == True: - video_out.release() # note need to release the video writer - Vilib.rec_video_set["start_flag"] == False - break - - @staticmethod - def rec_video_run(): - if Vilib.rec_thread != None: - Vilib.rec_video_stop() - Vilib.rec_video_set["stop_flag"] = False - Vilib.rec_thread = threading.Thread(name='rec_video', target=Vilib.rec_video_work) - Vilib.rec_thread.daemon = True - Vilib.rec_thread.start() - - @staticmethod - def rec_video_start(): - Vilib.rec_video_set["start_flag"] = True - Vilib.rec_video_set["stop_flag"] = False - - @staticmethod - def rec_video_pause(): - Vilib.rec_video_set["start_flag"] = False - - @staticmethod - def rec_video_stop(): - Vilib.rec_video_set["start_flag"] == False - Vilib.rec_video_set["stop_flag"] = True - if Vilib.rec_thread != None: - Vilib.rec_thread.join(3) - Vilib.rec_thread = None - - # color detection - # ================================================================= - @staticmethod - def color_detect(color="red"): - ''' - :param color: could be red, green, blue, yellow , orange, purple - ''' - Vilib.color_detect_color = color - from .color_detection import color_detect_work, color_obj_parameter - Vilib.color_detect_work = color_detect_work - Vilib.color_obj_parameter = color_obj_parameter - Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] - Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] - Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] - Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] - Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] - - @staticmethod - def color_detect_func(img): - if Vilib.color_detect_color is not None \ - and Vilib.color_detect_color != 'close' \ - and hasattr(Vilib, "color_detect_work"): - img = Vilib.color_detect_work(img, Vilib.camera_width, Vilib.camera_height, Vilib.color_detect_color) - Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] - Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] - Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] - Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] - Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] - return img - - @staticmethod - def close_color_detection(): - Vilib.color_detect_color = None - - # face detection - # ================================================================= - @staticmethod - def face_detect_switch(flag=False): - Vilib.face_detect_sw = flag - if Vilib.face_detect_sw: - from .face_detection import face_detect, set_face_detection_model, face_obj_parameter - Vilib.face_detect_work = face_detect - Vilib.set_face_detection_model = set_face_detection_model - Vilib.face_obj_parameter = face_obj_parameter - Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] - Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] - Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] - Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] - Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] - - @staticmethod - def face_detect_func(img): - if Vilib.face_detect_sw and hasattr(Vilib, "face_detect_work"): - img = Vilib.face_detect_work(img, Vilib.camera_width, Vilib.camera_height) - Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] - Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] - Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] - Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] - Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] - return img - - # traffic sign detection - # ================================================================= - @staticmethod - def traffic_detect_switch(flag=False): - Vilib.traffic_detect_sw = flag - if Vilib.traffic_detect_sw: - from .traffic_sign_detection import traffic_sign_detect, traffic_sign_obj_parameter - Vilib.traffic_detect_work = traffic_sign_detect - Vilib.traffic_sign_obj_parameter = traffic_sign_obj_parameter - Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] - Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] - Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] - Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] - Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] - Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] - - @staticmethod - def traffic_detect_fuc(img): - if Vilib.traffic_detect_sw and hasattr(Vilib, "traffic_detect_work"): - img = Vilib.traffic_detect_work(img, border_rgb=(255, 0, 0)) - Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] - Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] - Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] - Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] - Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] - Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] - return img - - # qrcode recognition - # ================================================================= - @staticmethod - def qrcode_detect_switch(flag=False): - Vilib.qrcode_detect_sw = flag - if Vilib.qrcode_detect_sw: - from .qrcode_recognition import qrcode_recognize, qrcode_obj_parameter - Vilib.qrcode_recognize = qrcode_recognize - Vilib.qrcode_obj_parameter = qrcode_obj_parameter - Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] - Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] - Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] - Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] - Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] - Vilib.detect_obj_parameter['qr_list'] = Vilib.qrcode_obj_parameter['list'] - - @staticmethod - def qrcode_detect_func(img): - if Vilib.qrcode_detect_sw and hasattr(Vilib, "qrcode_recognize"): - img = Vilib.qrcode_recognize(img, border_rgb=(255, 0, 0)) - Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] - Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] - Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] - Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] - Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] - return img - - # qrcode making - # ================================================================= - @staticmethod - def make_qrcode(data, - path=None, - version=1, - box_size=10, - border=4, - fill_color=(132, 112, 255), - back_color=(255, 255, 255) - ): - import qrcode # https://github.com/lincolnloop/python-qrcode - - qr = qrcode.QRCode( - version=version, - error_correction=qrcode.constants.ERROR_CORRECT_L, - box_size=box_size, - border=border, - ) - qr.add_data(data) - qr.make(fit=True) - qr_pil = qr.make_image(fill_color=fill_color, - back_color=back_color) - if path != None: - qr_pil.save(path) - - Vilib.qrcode_img = cv2.cvtColor(np.array(qr_pil), cv2.COLOR_RGB2BGR) - Vilib.qrcode_making_completed = True - - if Vilib.web_qrcode_flag: - Vilib.qrcode_img_encode = cv2.imencode('.jpg', Vilib.qrcode_img)[1].tobytes() - - - - @staticmethod - def display_qrcode_work(): - while True: - if Vilib.imshow_flag: - time.sleep(0.1) - continue - - # ----------- display qrcode on desktop ---------------- - if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed : - Vilib.qrcode_making_completed = False - try: - if len(Vilib.qrcode_img) > 10: - cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) - cv2.waitKey(1) - if cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) == 0: - cv2.destroyWindow(Vilib.qrcode_win_name) - except Exception as e: - Vilib.imshow_qrcode_flag = False - print(f"imshow qrcode failed:\n {e}") - break - time.sleep(0.1) - - @staticmethod - def display_qrcode(local=True, web=True): - # check gui - if local == True: - if 'DISPLAY' in os.environ.keys(): - Vilib.imshow_qrcode_flag = True - print("Imgshow qrcode start ...") - else: - Vilib.imshow_qrcode_flag = False - print("Local display failed, because there is no gui.") - # web video - if web == True: - Vilib.web_qrcode_flag = True - print(f'QRcode display on:') - wlan0, eth0 = getIP() - if wlan0 != None: - print(f" http://{wlan0}:9000/qrcode") - if eth0 != None: - print(f" http://{eth0}:9000/qrcode") - print() # new line - - # ----------- flask_thread ---------------- - if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: - print('Starting web streaming ...') - Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) - Vilib.flask_thread.daemon = True - Vilib.flask_thread.start() - - if Vilib.qrcode_display_thread == None or Vilib.qrcode_display_thread.is_alive() == False: - Vilib.qrcode_display_thread = threading.Thread(name='qrcode_display',target=Vilib.display_qrcode_work) - Vilib.qrcode_display_thread.daemon = True - Vilib.qrcode_display_thread.start() - - - # image classification - # ================================================================= - @staticmethod - def image_classify_switch(flag=False): - from .image_classification import image_classification_obj_parameter - Vilib.image_classify_sw = flag - Vilib.image_classification_obj_parameter = image_classification_obj_parameter - - @staticmethod - def image_classify_set_model(path): - if not os.path.exists(path): - raise ValueError('incorrect model path ') - Vilib.image_classification_model = path - - @staticmethod - def image_classify_set_labels(path): - if not os.path.exists(path): - raise ValueError('incorrect labels path ') - Vilib.image_classification_labels = path - - @staticmethod - def image_classify_fuc(img): - if Vilib.image_classify_sw == True: - # print('classify_image starting') - from .image_classification import classify_image - img = classify_image(image=img, - model=Vilib.image_classification_model, - labels=Vilib.image_classification_labels) - return img - - # objects detection - # ================================================================= - @staticmethod - def object_detect_switch(flag=False): - Vilib.objects_detect_sw = flag - if Vilib.objects_detect_sw == True: - from .objects_detection import object_detection_list_parameter - Vilib.object_detection_list_parameter = object_detection_list_parameter - - @staticmethod - def object_detect_set_model(path): - if not os.path.exists(path): - raise ValueError('incorrect model path ') - Vilib.objects_detection_model = path - - @staticmethod - def object_detect_set_labels(path): - if not os.path.exists(path): - raise ValueError('incorrect labels path ') - Vilib.objects_detection_labels = path - - @staticmethod - def object_detect_fuc(img): - if Vilib.objects_detect_sw == True: - # print('detect_objects starting') - from .objects_detection import detect_objects - img = detect_objects(image=img, - model=Vilib.objects_detection_model, - labels=Vilib.objects_detection_labels) - return img - - # hands detection - # ================================================================= - @staticmethod - def hands_detect_switch(flag=False): - from .hands_detection import DetectHands - Vilib.detect_hands = DetectHands() - Vilib.hands_detect_sw = flag - - @staticmethod - def hands_detect_fuc(img): - if Vilib.hands_detect_sw == True: - img, Vilib.detect_obj_parameter['hands_joints'] = Vilib.detect_hands.work(image=img) - return img - - # pose detection - # ================================================================= - @staticmethod - def pose_detect_switch(flag=False): - from .pose_detection import DetectPose - Vilib.pose_detect = DetectPose() - Vilib.pose_detect_sw = flag - - @staticmethod - def pose_detect_fuc(img): - if Vilib.pose_detect_sw == True and hasattr(Vilib, "pose_detect"): - img, Vilib.detect_obj_parameter['body_joints'] = Vilib.pose_detect.work(image=img) - return img diff --git a/build/lib/vilib/vilib_debug.py b/build/lib/vilib/vilib_debug.py deleted file mode 100644 index f209b82..0000000 --- a/build/lib/vilib/vilib_debug.py +++ /dev/null @@ -1,846 +0,0 @@ -#!/usr/bin/env python3 - -# whther print welcome message -import os -import logging - -from .version import __version__ -if 'VILIB_WELCOME' not in os.environ or os.environ['VILIB_WELCOME'] not in [ - 'False', '0' -]: - from pkg_resources import require - picamera2_version = require('picamera2')[0].version - print(f'vilib {__version__} launching ...') - print(f'picamera2 {picamera2_version}') - -# set libcamera2 log level -os.environ['LIBCAMERA_LOG_LEVELS'] = '*:ERROR' -from picamera2 import Picamera2 -import libcamera - -import cv2 -import numpy as np -from PIL import Image, ImageDraw, ImageFont - -from flask import Flask, render_template, Response - -import time -import datetime -import threading -from multiprocessing import Process, Manager - -from .utils import * - -# user and user home directory -# ================================================================= -user = os.popen("echo ${SUDO_USER:-$(who -m | awk '{ print $1 }')}").readline().strip() -user_home = os.popen(f'getent passwd {user} | cut -d: -f 6').readline().strip() -# print(f"user: {user}") -# print(f"user_home: {user_home}") - -# Default path for pictures and videos -DEFAULLT_PICTURES_PATH = '%s/Pictures/vilib/'%user_home -DEFAULLT_VIDEOS_PATH = '%s/Videos/vilib/'%user_home - -# utils -# ================================================================= -def findContours(img): - _tuple = cv2.findContours(img, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) - # compatible with opencv3.x and openc4.x - if len(_tuple) == 3: - _, contours, hierarchy = _tuple - else: - contours, hierarchy = _tuple - return contours, hierarchy - -# flask -# ================================================================= -os.environ['FLASK_DEBUG'] = 'development' -app = Flask(__name__) - -log = logging.getLogger('werkzeug') -log.setLevel(logging.ERROR) - -@app.route('/') -def index(): - """Video streaming home page.""" - return render_template('index.html') - -def get_frame(): - return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() - -def get_qrcode_pictrue(): - return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() - -def get_png_frame(): - return cv2.imencode('.png', Vilib.flask_img)[1].tobytes() - -def get_qrcode(): - while Vilib.qrcode_img_encode is None: - time.sleep(0.2) - - return Vilib.qrcode_img_encode - -def gen(): - """Video streaming generator function.""" - while True: - # start_time = time.time() - frame = get_frame() - yield (b'--frame\r\n' - b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') - time.sleep(0.03) - # end_time = time.time() - start_time - # print('flask fps:%s'%int(1/end_time)) - -@app.route('/mjpg') ## video -def video_feed(): - # from camera import Camera - """Video streaming route. Put this in the src attribute of an img tag.""" - if Vilib.web_display_flag: - response = Response(gen(), - mimetype='multipart/x-mixed-replace; boundary=frame') - response.headers.add("Access-Control-Allow-Origin", "*") - return response - else: - tip = ''' - Please enable web display first: - Vilib.display(web=True) -''' - html = f"

{tip}

" - return Response(html, mimetype='text/html') - -@app.route('/mjpg.jpg') # jpg -def video_feed_jpg(): - # from camera import Camera - """Video streaming route. Put this in the src attribute of an img tag.""" - response = Response(get_frame(), mimetype="image/jpeg") - response.headers.add("Access-Control-Allow-Origin", "*") - return response - -@app.route('/mjpg.png') # png -def video_feed_png(): - # from camera import Camera - """Video streaming route. Put this in the src attribute of an img tag.""" - response = Response(get_png_frame(), mimetype="image/png") - response.headers.add("Access-Control-Allow-Origin", "*") - return response - -@app.route("/qrcode") -def qrcode_feed(): - qrcode_html = ''' - - - - QRcode - - - - QR Code - - -''' - return Response(qrcode_html, mimetype='text/html') - - -@app.route("/qrcode.png") -def qrcode_feed_png(): - """Video streaming route. Put this in the src attribute of an img tag.""" - if Vilib.web_qrcode_flag: - # response = Response(get_qrcode(), - # mimetype='multipart/x-mixed-replace; boundary=frame') - response = Response(get_qrcode(), mimetype="image/png") - response.headers.add("Access-Control-Allow-Origin", "*") - return response - else: - tip = ''' - Please enable web display first: - Vilib.display_qrcode(web=True) -''' - html = f"

{tip}

" - return Response(html, mimetype='text/html') - -def web_camera_start(): - try: - Vilib.flask_start = True - app.run(host='0.0.0.0', port=9000, threaded=True, debug=False) - except Exception as e: - print(e) - -# Vilib -# ================================================================= -class Vilib(object): - - picam2 = Picamera2() - - camera_size = (640, 480) - camera_width = 640 - camera_height = 480 - camera_vflip = False - camera_hflip = False - camera_run = False - - flask_thread = None - camera_thread = None - flask_start = False - - qrcode_display_thread = None - qrcode_making_completed = False - qrcode_img = Manager().list(range(1)) - qrcode_img_encode = None - qrcode_win_name = 'qrcode' - - img = Manager().list(range(1)) - flask_img = Manager().list(range(1)) - - Windows_Name = "picamera" - imshow_flag = False - web_display_flag = False - imshow_qrcode_flag = False - web_qrcode_flag = False - - draw_fps = False - fps_origin = (camera_width-105, 20) - fps_size = 0.6 - fps_color = (255, 255, 255) - - detect_obj_parameter = {} - color_detect_color = None - face_detect_sw = False - hands_detect_sw = False - pose_detect_sw = False - image_classify_sw = False - image_classification_model = None - image_classification_labels = None - objects_detect_sw = False - objects_detection_model = None - objects_detection_labels = None - qrcode_detect_sw = False - traffic_detect_sw = False - - @staticmethod - def get_instance(): - return Vilib.picam2 - - @staticmethod - def set_controls(controls): - Vilib.picam2.set_controls(controls) - - @staticmethod - def get_controls(): - return Vilib.picam2.capture_metadata() - - @staticmethod - def camera(): - Vilib.camera_width = Vilib.camera_size[0] - Vilib.camera_height = Vilib.camera_size[1] - - picam2 = Vilib.picam2 - - preview_config = picam2.preview_configuration - # preview_config.size = (800, 600) - preview_config.size = Vilib.camera_size - preview_config.format = 'RGB888' # 'XRGB8888', 'XBGR8888', 'RGB888', 'BGR888', 'YUV420' - preview_config.transform = libcamera.Transform( - hflip=Vilib.camera_hflip, - vflip=Vilib.camera_vflip - ) - preview_config.colour_space = libcamera.ColorSpace.Sycc() - preview_config.buffer_count = 4 - preview_config.queue = True - # preview_config.raw = {'size': (2304, 1296)} - preview_config.controls = {'FrameRate': 60} # change picam2.capture_array() takes time - - try: - picam2.start() - except Exception as e: - print(f"\033[38;5;1mError:\033[0m\n{e}") - print("\nPlease check whether the camera is connected well" +\ - "You can use the \"libcamea-hello\" command to test the camera" - ) - exit(1) - Vilib.camera_run = True - Vilib.fps_origin = (Vilib.camera_width-105, 20) - fps = 0 - start_time = 0 - framecount = 0 - try: - start_time = time.time() - while True: - # ----------- extract image data ---------------- - # st = time.time() - Vilib.img = picam2.capture_array() - # print(f'picam2.capture_array(): {time.time() - st:.6f}') - # st = time.time() - - # ----------- image gains and effects ---------------- - - # ----------- image detection and recognition ---------------- - Vilib.img = Vilib.color_detect_func(Vilib.img) - Vilib.img = Vilib.face_detect_func(Vilib.img) - Vilib.img = Vilib.traffic_detect_fuc(Vilib.img) - Vilib.img = Vilib.qrcode_detect_func(Vilib.img) - - Vilib.img = Vilib.image_classify_fuc(Vilib.img) - Vilib.img = Vilib.object_detect_fuc(Vilib.img) - Vilib.img = Vilib.hands_detect_fuc(Vilib.img) - Vilib.img = Vilib.pose_detect_fuc(Vilib.img) - - # ----------- calculate fps and draw fps ---------------- - # calculate fps - framecount += 1 - elapsed_time = float(time.time() - start_time) - if (elapsed_time > 1): - fps = round(framecount/elapsed_time, 1) - framecount = 0 - start_time = time.time() - - # print(f"elapsed_time: {elapsed_time}, fps: {fps}") - - # draw fps - if Vilib.draw_fps: - cv2.putText( - # img, # image - Vilib.img, - f"FPS: {fps}", # text - Vilib.fps_origin, # origin - cv2.FONT_HERSHEY_SIMPLEX, # font - Vilib.fps_size, # font_scale - Vilib.fps_color, # font_color - 1, # thickness - cv2.LINE_AA, # line_type: LINE_8 (default), LINE_4, LINE_AA - ) - - # ---- copy img for flask --- - # st = time.time() - Vilib.flask_img = Vilib.img - # print(f'vilib.flask_img: {time.time() - st:.6f}') - - # ----------- display on desktop ---------------- - if Vilib.imshow_flag == True: - try: - try: - prop = cv2.getWindowProperty(Vilib.Windows_Name, cv2.WND_PROP_VISIBLE) - qrcode_prop = cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) - if prop < 1 or qrcode_prop < 1: - break - except: - pass - - cv2.imshow(Vilib.Windows_Name, Vilib.img) - - if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed: - Vilib.qrcode_making_completed = False - cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) - - cv2.waitKey(1) - - except Exception as e: - Vilib.imshow_flag = False - print(f"imshow failed:\n {e}") - break - - # ----------- exit ---------------- - if Vilib.camera_run == False: - break - - # print(f'loop end: {time.time() - st:.6f}') - - except KeyboardInterrupt as e: - print(e) - finally: - picam2.close() - cv2.destroyAllWindows() - - @staticmethod - def camera_start(vflip=False, hflip=False, size=None): - if size is not None: - Vilib.camera_size = size - Vilib.camera_hflip = hflip - Vilib.camera_vflip = vflip - Vilib.camera_thread = threading.Thread(target=Vilib.camera, name="vilib") - Vilib.camera_thread.daemon = False - Vilib.camera_thread.start() - while not Vilib.camera_run: - time.sleep(0.1) - - @staticmethod - def camera_close(): - if Vilib.camera_thread != None: - Vilib.camera_run = False - time.sleep(0.1) - # Wait for camera thread to finish - if Vilib.camera_thread.is_alive(): - Vilib.camera_thread.join(timeout=2.0) - - # Properly close and reinitialize Picamera2 - try: - if Vilib.picam2 is not None: - Vilib.picam2.close() - time.sleep(0.1) - - # Recreate Picamera2 object completely fresh - Vilib.picam2 = Picamera2() - - # Create and configure a default preview configuration - preview_config = Vilib.picam2.create_preview_configuration() - Vilib.picam2.configure(preview_config) - - except Exception as e: - print(f"Warning during camera cleanup: {e}") - # Force recreation of Picamera2 object with basic setup - try: - Vilib.picam2 = Picamera2() - preview_config = Vilib.picam2.create_preview_configuration() - Vilib.picam2.configure(preview_config) - except Exception as e2: - print(f"Failed to reinitialize camera: {e2}") - - # Reset thread reference - Vilib.camera_thread = None - @staticmethod - def display(local=True, web=True): - # cheack camera thread is_alive - if Vilib.camera_thread != None and Vilib.camera_thread.is_alive(): - # check gui - if local == True: - if 'DISPLAY' in os.environ.keys(): - Vilib.imshow_flag = True - print("Imgshow start ...") - else: - Vilib.imshow_flag = False - print("Local display failed, because there is no gui.") - # web video - if web == True: - Vilib.web_display_flag = True - print("\nWeb display on:") - wlan0, eth0 = getIP() - if wlan0 != None: - print(f" http://{wlan0}:9000/mjpg") - if eth0 != None: - print(f" http://{eth0}:9000/mjpg") - print() # new line - - # ----------- flask_thread ---------------- - if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: - print('Starting web streaming ...') - Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) - Vilib.flask_thread.daemon = True - Vilib.flask_thread.start() - else: - print('Error: Please execute < camera_start() > first.') - - @staticmethod - def show_fps(color=None, fps_size=None, fps_origin=None): - if color is not None: - Vilib.fps_color = color - if fps_size is not None: - Vilib.fps_size = fps_size - if fps_origin is not None: - Vilib.fps_origin = fps_origin - - Vilib.draw_fps = True - - @staticmethod - def hide_fps(): - Vilib.draw_fps = False - - # take photo - # ================================================================= - @staticmethod - def take_photo(photo_name, path=DEFAULLT_PICTURES_PATH): - # ----- check path ----- - if not os.path.exists(path): - # print('Path does not exist. Creating path now ... ') - os.makedirs(name=path, mode=0o751, exist_ok=True) - time.sleep(0.01) - # ----- save photo ----- - status = False - for _ in range(5): - if Vilib.img is not None: - status = cv2.imwrite(path + '/' + photo_name +'.jpg', Vilib.img) - break - else: - time.sleep(0.01) - else: - status = False - - # if status: - # print('The photo is saved as '+path+'/'+photo_name+'.jpg') - # else: - # print('Photo save failed .. ') - - return status - - - # record video - # ================================================================= - rec_video_set = {} - - rec_video_set["fourcc"] = cv2.VideoWriter_fourcc(*'XVID') - #rec_video_set["fourcc"] = cv2.cv.CV_FOURCC("D", "I", "B", " ") - - rec_video_set["fps"] = 30.0 - rec_video_set["framesize"] = (640, 480) - rec_video_set["isColor"] = True - - rec_video_set["name"] = "default" - rec_video_set["path"] = DEFAULLT_VIDEOS_PATH - - rec_video_set["start_flag"] = False - rec_video_set["stop_flag"] = False - - rec_thread = None - - @staticmethod - def rec_video_work(): - if not os.path.exists(Vilib.rec_video_set["path"]): - # print('Path does not exist. Creating path now ... ') - os.makedirs(name=Vilib.rec_video_set["path"], - mode=0o751, - exist_ok=True - ) - time.sleep(0.01) - video_out = cv2.VideoWriter(Vilib.rec_video_set["path"]+'/'+Vilib.rec_video_set["name"]+'.avi', - Vilib.rec_video_set["fourcc"], Vilib.rec_video_set["fps"], - Vilib.rec_video_set["framesize"], Vilib.rec_video_set["isColor"]) - - while True: - if Vilib.rec_video_set["start_flag"] == True: - # video_out.write(Vilib.img_array[0]) - video_out.write(Vilib.img) - if Vilib.rec_video_set["stop_flag"] == True: - video_out.release() # note need to release the video writer - Vilib.rec_video_set["start_flag"] == False - break - - @staticmethod - def rec_video_run(): - if Vilib.rec_thread != None: - Vilib.rec_video_stop() - Vilib.rec_video_set["stop_flag"] = False - Vilib.rec_thread = threading.Thread(name='rec_video', target=Vilib.rec_video_work) - Vilib.rec_thread.daemon = True - Vilib.rec_thread.start() - - @staticmethod - def rec_video_start(): - Vilib.rec_video_set["start_flag"] = True - Vilib.rec_video_set["stop_flag"] = False - - @staticmethod - def rec_video_pause(): - Vilib.rec_video_set["start_flag"] = False - - @staticmethod - def rec_video_stop(): - Vilib.rec_video_set["start_flag"] == False - Vilib.rec_video_set["stop_flag"] = True - if Vilib.rec_thread != None: - Vilib.rec_thread.join(3) - Vilib.rec_thread = None - - # color detection - # ================================================================= - @staticmethod - def color_detect(color="red"): - ''' - :param color: could be red, green, blue, yellow , orange, purple - ''' - Vilib.color_detect_color = color - from .color_detection import color_detect_work, color_obj_parameter - Vilib.color_detect_work = color_detect_work - Vilib.color_obj_parameter = color_obj_parameter - Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] - Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] - Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] - Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] - Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] - - @staticmethod - def color_detect_func(img): - if Vilib.color_detect_color is not None \ - and Vilib.color_detect_color != 'close' \ - and hasattr(Vilib, "color_detect_work"): - img = Vilib.color_detect_work(img, Vilib.camera_width, Vilib.camera_height, Vilib.color_detect_color) - Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] - Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] - Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] - Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] - Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] - return img - - @staticmethod - def close_color_detection(): - Vilib.color_detect_color = None - - # face detection - # ================================================================= - @staticmethod - def face_detect_switch(flag=False): - Vilib.face_detect_sw = flag - if Vilib.face_detect_sw: - from .face_detection import face_detect, set_face_detection_model, face_obj_parameter - Vilib.face_detect_work = face_detect - Vilib.set_face_detection_model = set_face_detection_model - Vilib.face_obj_parameter = face_obj_parameter - Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] - Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] - Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] - Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] - Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] - - @staticmethod - def face_detect_func(img): - if Vilib.face_detect_sw and hasattr(Vilib, "face_detect_work"): - img = Vilib.face_detect_work(img, Vilib.camera_width, Vilib.camera_height) - Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] - Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] - Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] - Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] - Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] - return img - - # traffic sign detection - # ================================================================= - @staticmethod - def traffic_detect_switch(flag=False): - Vilib.traffic_detect_sw = flag - if Vilib.traffic_detect_sw: - from .traffic_sign_detection import traffic_sign_detect, traffic_sign_obj_parameter - Vilib.traffic_detect_work = traffic_sign_detect - Vilib.traffic_sign_obj_parameter = traffic_sign_obj_parameter - Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] - Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] - Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] - Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] - Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] - Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] - - @staticmethod - def traffic_detect_fuc(img): - if Vilib.traffic_detect_sw and hasattr(Vilib, "traffic_detect_work"): - img = Vilib.traffic_detect_work(img, border_rgb=(255, 0, 0)) - Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] - Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] - Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] - Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] - Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] - Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] - return img - - # qrcode recognition - # ================================================================= - @staticmethod - def qrcode_detect_switch(flag=False): - Vilib.qrcode_detect_sw = flag - if Vilib.qrcode_detect_sw: - from .qrcode_recognition import qrcode_recognize, qrcode_obj_parameter - Vilib.qrcode_recognize = qrcode_recognize - Vilib.qrcode_obj_parameter = qrcode_obj_parameter - Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] - Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] - Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] - Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] - Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] - Vilib.detect_obj_parameter['qr_list'] = Vilib.qrcode_obj_parameter['list'] - - @staticmethod - def qrcode_detect_func(img): - if Vilib.qrcode_detect_sw and hasattr(Vilib, "qrcode_recognize"): - img = Vilib.qrcode_recognize(img, border_rgb=(255, 0, 0)) - Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] - Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] - Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] - Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] - Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] - return img - - # qrcode making - # ================================================================= - @staticmethod - def make_qrcode(data, - path=None, - version=1, - box_size=10, - border=4, - fill_color=(132, 112, 255), - back_color=(255, 255, 255) - ): - import qrcode # https://github.com/lincolnloop/python-qrcode - - qr = qrcode.QRCode( - version=version, - error_correction=qrcode.constants.ERROR_CORRECT_L, - box_size=box_size, - border=border, - ) - qr.add_data(data) - qr.make(fit=True) - qr_pil = qr.make_image(fill_color=fill_color, - back_color=back_color) - if path != None: - qr_pil.save(path) - - Vilib.qrcode_img = cv2.cvtColor(np.array(qr_pil), cv2.COLOR_RGB2BGR) - Vilib.qrcode_making_completed = True - - if Vilib.web_qrcode_flag: - Vilib.qrcode_img_encode = cv2.imencode('.jpg', Vilib.qrcode_img)[1].tobytes() - - - - @staticmethod - def display_qrcode_work(): - while True: - if Vilib.imshow_flag: - time.sleep(0.1) - continue - - # ----------- display qrcode on desktop ---------------- - if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed : - Vilib.qrcode_making_completed = False - try: - if len(Vilib.qrcode_img) > 10: - cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) - cv2.waitKey(1) - if cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) == 0: - cv2.destroyWindow(Vilib.qrcode_win_name) - except Exception as e: - Vilib.imshow_qrcode_flag = False - print(f"imshow qrcode failed:\n {e}") - break - time.sleep(0.1) - - @staticmethod - def display_qrcode(local=True, web=True): - # check gui - if local == True: - if 'DISPLAY' in os.environ.keys(): - Vilib.imshow_qrcode_flag = True - print("Imgshow qrcode start ...") - else: - Vilib.imshow_qrcode_flag = False - print("Local display failed, because there is no gui.") - # web video - if web == True: - Vilib.web_qrcode_flag = True - print(f'QRcode display on:') - wlan0, eth0 = getIP() - if wlan0 != None: - print(f" http://{wlan0}:9000/qrcode") - if eth0 != None: - print(f" http://{eth0}:9000/qrcode") - print() # new line - - # ----------- flask_thread ---------------- - if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: - print('Starting web streaming ...') - Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) - Vilib.flask_thread.daemon = True - Vilib.flask_thread.start() - - if Vilib.qrcode_display_thread == None or Vilib.qrcode_display_thread.is_alive() == False: - Vilib.qrcode_display_thread = threading.Thread(name='qrcode_display',target=Vilib.display_qrcode_work) - Vilib.qrcode_display_thread.daemon = True - Vilib.qrcode_display_thread.start() - - - # image classification - # ================================================================= - @staticmethod - def image_classify_switch(flag=False): - from .image_classification import image_classification_obj_parameter - Vilib.image_classify_sw = flag - Vilib.image_classification_obj_parameter = image_classification_obj_parameter - - @staticmethod - def image_classify_set_model(path): - if not os.path.exists(path): - raise ValueError('incorrect model path ') - Vilib.image_classification_model = path - - @staticmethod - def image_classify_set_labels(path): - if not os.path.exists(path): - raise ValueError('incorrect labels path ') - Vilib.image_classification_labels = path - - @staticmethod - def image_classify_fuc(img): - if Vilib.image_classify_sw == True: - # print('classify_image starting') - from .image_classification import classify_image - img = classify_image(image=img, - model=Vilib.image_classification_model, - labels=Vilib.image_classification_labels) - return img - - # objects detection - # ================================================================= - @staticmethod - def object_detect_switch(flag=False): - Vilib.objects_detect_sw = flag - if Vilib.objects_detect_sw == True: - from .objects_detection import object_detection_list_parameter - Vilib.object_detection_list_parameter = object_detection_list_parameter - - @staticmethod - def object_detect_set_model(path): - if not os.path.exists(path): - raise ValueError('incorrect model path ') - Vilib.objects_detection_model = path - - @staticmethod - def object_detect_set_labels(path): - if not os.path.exists(path): - raise ValueError('incorrect labels path ') - Vilib.objects_detection_labels = path - - @staticmethod - def object_detect_fuc(img): - if Vilib.objects_detect_sw == True: - # print('detect_objects starting') - from .objects_detection import detect_objects - img = detect_objects(image=img, - model=Vilib.objects_detection_model, - labels=Vilib.objects_detection_labels) - return img - - # hands detection - # ================================================================= - @staticmethod - def hands_detect_switch(flag=False): - from .hands_detection import DetectHands - Vilib.detect_hands = DetectHands() - Vilib.hands_detect_sw = flag - - @staticmethod - def hands_detect_fuc(img): - if Vilib.hands_detect_sw == True: - img, Vilib.detect_obj_parameter['hands_joints'] = Vilib.detect_hands.work(image=img) - return img - - # pose detection - # ================================================================= - @staticmethod - def pose_detect_switch(flag=False): - from .pose_detection import DetectPose - Vilib.pose_detect = DetectPose() - Vilib.pose_detect_sw = flag - - @staticmethod - def pose_detect_fuc(img): - if Vilib.pose_detect_sw == True and hasattr(Vilib, "pose_detect"): - img, Vilib.detect_obj_parameter['body_joints'] = Vilib.pose_detect.work(image=img) - return img From c0cca768a14e175f1a58176de9670d47e8712478 Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 7 Nov 2025 18:19:02 +0100 Subject: [PATCH 4/5] Cleanup --- =3.20.0 => =3.21.0 | 0 vilib/camera_close_fix.txt | 21 - vilib/camera_close_fix_final.txt | 33 -- vilib/camera_close_fix_v2.txt | 30 -- vilib/vilib.py.backup | 818 ------------------------------- 5 files changed, 902 deletions(-) rename =3.20.0 => =3.21.0 (100%) delete mode 100644 vilib/camera_close_fix.txt delete mode 100644 vilib/camera_close_fix_final.txt delete mode 100644 vilib/camera_close_fix_v2.txt delete mode 100644 vilib/vilib.py.backup diff --git a/=3.20.0 b/=3.21.0 similarity index 100% rename from =3.20.0 rename to =3.21.0 diff --git a/vilib/camera_close_fix.txt b/vilib/camera_close_fix.txt deleted file mode 100644 index 1056e3c..0000000 --- a/vilib/camera_close_fix.txt +++ /dev/null @@ -1,21 +0,0 @@ - def camera_close(): - if Vilib.camera_thread != None: - Vilib.camera_run = False - time.sleep(0.1) - # Wait for camera thread to finish - if Vilib.camera_thread.is_alive(): - Vilib.camera_thread.join(timeout=2.0) - - # Properly close and reinitialize Picamera2 - try: - if Vilib.picam2 is not None: - Vilib.picam2.close() - time.sleep(0.1) - Vilib.picam2 = Picamera2() - except Exception as e: - print(f"Warning during camera cleanup: {e}") - # Force recreation of Picamera2 object - Vilib.picam2 = Picamera2() - - # Reset thread reference - Vilib.camera_thread = None diff --git a/vilib/camera_close_fix_final.txt b/vilib/camera_close_fix_final.txt deleted file mode 100644 index 4d30db2..0000000 --- a/vilib/camera_close_fix_final.txt +++ /dev/null @@ -1,33 +0,0 @@ - def camera_close(): - if Vilib.camera_thread != None: - Vilib.camera_run = False - time.sleep(0.1) - # Wait for camera thread to finish - if Vilib.camera_thread.is_alive(): - Vilib.camera_thread.join(timeout=2.0) - - # Properly close and reinitialize Picamera2 - try: - if Vilib.picam2 is not None: - Vilib.picam2.close() - time.sleep(0.1) - - # Recreate Picamera2 object completely fresh - Vilib.picam2 = Picamera2() - - # Create and configure a default preview configuration - preview_config = Vilib.picam2.create_preview_configuration() - Vilib.picam2.configure(preview_config) - - except Exception as e: - print(f"Warning during camera cleanup: {e}") - # Force recreation of Picamera2 object with basic setup - try: - Vilib.picam2 = Picamera2() - preview_config = Vilib.picam2.create_preview_configuration() - Vilib.picam2.configure(preview_config) - except Exception as e2: - print(f"Failed to reinitialize camera: {e2}") - - # Reset thread reference - Vilib.camera_thread = None diff --git a/vilib/camera_close_fix_v2.txt b/vilib/camera_close_fix_v2.txt deleted file mode 100644 index 8d5abfa..0000000 --- a/vilib/camera_close_fix_v2.txt +++ /dev/null @@ -1,30 +0,0 @@ - def camera_close(): - if Vilib.camera_thread != None: - Vilib.camera_run = False - time.sleep(0.1) - # Wait for camera thread to finish - if Vilib.camera_thread.is_alive(): - Vilib.camera_thread.join(timeout=2.0) - - # Properly close and reinitialize Picamera2 - try: - if Vilib.picam2 is not None: - Vilib.picam2.close() - time.sleep(0.1) - # Recreate Picamera2 with fresh configuration - Vilib.picam2 = Picamera2() - # Important: Create the configuration objects immediately - _ = Vilib.picam2.preview_configuration - _ = Vilib.picam2.still_configuration - except Exception as e: - print(f"Warning during camera cleanup: {e}") - # Force recreation of Picamera2 object - try: - Vilib.picam2 = Picamera2() - _ = Vilib.picam2.preview_configuration - _ = Vilib.picam2.still_configuration - except: - pass - - # Reset thread reference - Vilib.camera_thread = None diff --git a/vilib/vilib.py.backup b/vilib/vilib.py.backup deleted file mode 100644 index 5144f10..0000000 --- a/vilib/vilib.py.backup +++ /dev/null @@ -1,818 +0,0 @@ -#!/usr/bin/env python3 - -# whther print welcome message -import os -import logging - -from .version import __version__ -if 'VILIB_WELCOME' not in os.environ or os.environ['VILIB_WELCOME'] not in [ - 'False', '0' -]: - from pkg_resources import require - picamera2_version = require('picamera2')[0].version - print(f'vilib {__version__} launching ...') - print(f'picamera2 {picamera2_version}') - -# set libcamera2 log level -os.environ['LIBCAMERA_LOG_LEVELS'] = '*:ERROR' -from picamera2 import Picamera2 -import libcamera - -import cv2 -import numpy as np -from PIL import Image, ImageDraw, ImageFont - -from flask import Flask, render_template, Response - -import time -import datetime -import threading -from multiprocessing import Process, Manager - -from .utils import * - -# user and user home directory -# ================================================================= -user = os.popen("echo ${SUDO_USER:-$(who -m | awk '{ print $1 }')}").readline().strip() -user_home = os.popen(f'getent passwd {user} | cut -d: -f 6').readline().strip() -# print(f"user: {user}") -# print(f"user_home: {user_home}") - -# Default path for pictures and videos -DEFAULLT_PICTURES_PATH = '%s/Pictures/vilib/'%user_home -DEFAULLT_VIDEOS_PATH = '%s/Videos/vilib/'%user_home - -# utils -# ================================================================= -def findContours(img): - _tuple = cv2.findContours(img, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) - # compatible with opencv3.x and openc4.x - if len(_tuple) == 3: - _, contours, hierarchy = _tuple - else: - contours, hierarchy = _tuple - return contours, hierarchy - -# flask -# ================================================================= -os.environ['FLASK_DEBUG'] = 'development' -app = Flask(__name__) - -log = logging.getLogger('werkzeug') -log.setLevel(logging.ERROR) - -@app.route('/') -def index(): - """Video streaming home page.""" - return render_template('index.html') - -def get_frame(): - return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() - -def get_qrcode_pictrue(): - return cv2.imencode('.jpg', Vilib.flask_img)[1].tobytes() - -def get_png_frame(): - return cv2.imencode('.png', Vilib.flask_img)[1].tobytes() - -def get_qrcode(): - while Vilib.qrcode_img_encode is None: - time.sleep(0.2) - - return Vilib.qrcode_img_encode - -def gen(): - """Video streaming generator function.""" - while True: - # start_time = time.time() - frame = get_frame() - yield (b'--frame\r\n' - b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') - time.sleep(0.03) - # end_time = time.time() - start_time - # print('flask fps:%s'%int(1/end_time)) - -@app.route('/mjpg') ## video -def video_feed(): - # from camera import Camera - """Video streaming route. Put this in the src attribute of an img tag.""" - if Vilib.web_display_flag: - response = Response(gen(), - mimetype='multipart/x-mixed-replace; boundary=frame') - response.headers.add("Access-Control-Allow-Origin", "*") - return response - else: - tip = ''' - Please enable web display first: - Vilib.display(web=True) -''' - html = f"

{tip}

" - return Response(html, mimetype='text/html') - -@app.route('/mjpg.jpg') # jpg -def video_feed_jpg(): - # from camera import Camera - """Video streaming route. Put this in the src attribute of an img tag.""" - response = Response(get_frame(), mimetype="image/jpeg") - response.headers.add("Access-Control-Allow-Origin", "*") - return response - -@app.route('/mjpg.png') # png -def video_feed_png(): - # from camera import Camera - """Video streaming route. Put this in the src attribute of an img tag.""" - response = Response(get_png_frame(), mimetype="image/png") - response.headers.add("Access-Control-Allow-Origin", "*") - return response - -@app.route("/qrcode") -def qrcode_feed(): - qrcode_html = ''' - - - - QRcode - - - - QR Code - - -''' - return Response(qrcode_html, mimetype='text/html') - - -@app.route("/qrcode.png") -def qrcode_feed_png(): - """Video streaming route. Put this in the src attribute of an img tag.""" - if Vilib.web_qrcode_flag: - # response = Response(get_qrcode(), - # mimetype='multipart/x-mixed-replace; boundary=frame') - response = Response(get_qrcode(), mimetype="image/png") - response.headers.add("Access-Control-Allow-Origin", "*") - return response - else: - tip = ''' - Please enable web display first: - Vilib.display_qrcode(web=True) -''' - html = f"

{tip}

" - return Response(html, mimetype='text/html') - -def web_camera_start(): - try: - Vilib.flask_start = True - app.run(host='0.0.0.0', port=9000, threaded=True, debug=False) - except Exception as e: - print(e) - -# Vilib -# ================================================================= -class Vilib(object): - - picam2 = Picamera2() - - camera_size = (640, 480) - camera_width = 640 - camera_height = 480 - camera_vflip = False - camera_hflip = False - camera_run = False - - flask_thread = None - camera_thread = None - flask_start = False - - qrcode_display_thread = None - qrcode_making_completed = False - qrcode_img = Manager().list(range(1)) - qrcode_img_encode = None - qrcode_win_name = 'qrcode' - - img = Manager().list(range(1)) - flask_img = Manager().list(range(1)) - - Windows_Name = "picamera" - imshow_flag = False - web_display_flag = False - imshow_qrcode_flag = False - web_qrcode_flag = False - - draw_fps = False - fps_origin = (camera_width-105, 20) - fps_size = 0.6 - fps_color = (255, 255, 255) - - detect_obj_parameter = {} - color_detect_color = None - face_detect_sw = False - hands_detect_sw = False - pose_detect_sw = False - image_classify_sw = False - image_classification_model = None - image_classification_labels = None - objects_detect_sw = False - objects_detection_model = None - objects_detection_labels = None - qrcode_detect_sw = False - traffic_detect_sw = False - - @staticmethod - def get_instance(): - return Vilib.picam2 - - @staticmethod - def set_controls(controls): - Vilib.picam2.set_controls(controls) - - @staticmethod - def get_controls(): - return Vilib.picam2.capture_metadata() - - @staticmethod - def camera(): - Vilib.camera_width = Vilib.camera_size[0] - Vilib.camera_height = Vilib.camera_size[1] - - picam2 = Vilib.picam2 - - preview_config = picam2.preview_configuration - # preview_config.size = (800, 600) - preview_config.size = Vilib.camera_size - preview_config.format = 'RGB888' # 'XRGB8888', 'XBGR8888', 'RGB888', 'BGR888', 'YUV420' - preview_config.transform = libcamera.Transform( - hflip=Vilib.camera_hflip, - vflip=Vilib.camera_vflip - ) - preview_config.colour_space = libcamera.ColorSpace.Sycc() - preview_config.buffer_count = 4 - preview_config.queue = True - # preview_config.raw = {'size': (2304, 1296)} - preview_config.controls = {'FrameRate': 60} # change picam2.capture_array() takes time - - try: - picam2.start() - except Exception as e: - print(f"\033[38;5;1mError:\033[0m\n{e}") - print("\nPlease check whether the camera is connected well" +\ - "You can use the \"libcamea-hello\" command to test the camera" - ) - exit(1) - Vilib.camera_run = True - Vilib.fps_origin = (Vilib.camera_width-105, 20) - fps = 0 - start_time = 0 - framecount = 0 - try: - start_time = time.time() - while True: - # ----------- extract image data ---------------- - # st = time.time() - Vilib.img = picam2.capture_array() - # print(f'picam2.capture_array(): {time.time() - st:.6f}') - # st = time.time() - - # ----------- image gains and effects ---------------- - - # ----------- image detection and recognition ---------------- - Vilib.img = Vilib.color_detect_func(Vilib.img) - Vilib.img = Vilib.face_detect_func(Vilib.img) - Vilib.img = Vilib.traffic_detect_fuc(Vilib.img) - Vilib.img = Vilib.qrcode_detect_func(Vilib.img) - - Vilib.img = Vilib.image_classify_fuc(Vilib.img) - Vilib.img = Vilib.object_detect_fuc(Vilib.img) - Vilib.img = Vilib.hands_detect_fuc(Vilib.img) - Vilib.img = Vilib.pose_detect_fuc(Vilib.img) - - # ----------- calculate fps and draw fps ---------------- - # calculate fps - framecount += 1 - elapsed_time = float(time.time() - start_time) - if (elapsed_time > 1): - fps = round(framecount/elapsed_time, 1) - framecount = 0 - start_time = time.time() - - # print(f"elapsed_time: {elapsed_time}, fps: {fps}") - - # draw fps - if Vilib.draw_fps: - cv2.putText( - # img, # image - Vilib.img, - f"FPS: {fps}", # text - Vilib.fps_origin, # origin - cv2.FONT_HERSHEY_SIMPLEX, # font - Vilib.fps_size, # font_scale - Vilib.fps_color, # font_color - 1, # thickness - cv2.LINE_AA, # line_type: LINE_8 (default), LINE_4, LINE_AA - ) - - # ---- copy img for flask --- - # st = time.time() - Vilib.flask_img = Vilib.img - # print(f'vilib.flask_img: {time.time() - st:.6f}') - - # ----------- display on desktop ---------------- - if Vilib.imshow_flag == True: - try: - try: - prop = cv2.getWindowProperty(Vilib.Windows_Name, cv2.WND_PROP_VISIBLE) - qrcode_prop = cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) - if prop < 1 or qrcode_prop < 1: - break - except: - pass - - cv2.imshow(Vilib.Windows_Name, Vilib.img) - - if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed: - Vilib.qrcode_making_completed = False - cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) - - cv2.waitKey(1) - - except Exception as e: - Vilib.imshow_flag = False - print(f"imshow failed:\n {e}") - break - - # ----------- exit ---------------- - if Vilib.camera_run == False: - break - - # print(f'loop end: {time.time() - st:.6f}') - - except KeyboardInterrupt as e: - print(e) - finally: - picam2.close() - cv2.destroyAllWindows() - - @staticmethod - def camera_start(vflip=False, hflip=False, size=None): - if size is not None: - Vilib.camera_size = size - Vilib.camera_hflip = hflip - Vilib.camera_vflip = vflip - Vilib.camera_thread = threading.Thread(target=Vilib.camera, name="vilib") - Vilib.camera_thread.daemon = False - Vilib.camera_thread.start() - while not Vilib.camera_run: - time.sleep(0.1) - - @staticmethod - def camera_close(): - if Vilib.camera_thread != None: - Vilib.camera_run = False - time.sleep(0.1) - - @staticmethod - def display(local=True, web=True): - # cheack camera thread is_alive - if Vilib.camera_thread != None and Vilib.camera_thread.is_alive(): - # check gui - if local == True: - if 'DISPLAY' in os.environ.keys(): - Vilib.imshow_flag = True - print("Imgshow start ...") - else: - Vilib.imshow_flag = False - print("Local display failed, because there is no gui.") - # web video - if web == True: - Vilib.web_display_flag = True - print("\nWeb display on:") - wlan0, eth0 = getIP() - if wlan0 != None: - print(f" http://{wlan0}:9000/mjpg") - if eth0 != None: - print(f" http://{eth0}:9000/mjpg") - print() # new line - - # ----------- flask_thread ---------------- - if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: - print('Starting web streaming ...') - Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) - Vilib.flask_thread.daemon = True - Vilib.flask_thread.start() - else: - print('Error: Please execute < camera_start() > first.') - - @staticmethod - def show_fps(color=None, fps_size=None, fps_origin=None): - if color is not None: - Vilib.fps_color = color - if fps_size is not None: - Vilib.fps_size = fps_size - if fps_origin is not None: - Vilib.fps_origin = fps_origin - - Vilib.draw_fps = True - - @staticmethod - def hide_fps(): - Vilib.draw_fps = False - - # take photo - # ================================================================= - @staticmethod - def take_photo(photo_name, path=DEFAULLT_PICTURES_PATH): - # ----- check path ----- - if not os.path.exists(path): - # print('Path does not exist. Creating path now ... ') - os.makedirs(name=path, mode=0o751, exist_ok=True) - time.sleep(0.01) - # ----- save photo ----- - status = False - for _ in range(5): - if Vilib.img is not None: - status = cv2.imwrite(path + '/' + photo_name +'.jpg', Vilib.img) - break - else: - time.sleep(0.01) - else: - status = False - - # if status: - # print('The photo is saved as '+path+'/'+photo_name+'.jpg') - # else: - # print('Photo save failed .. ') - - return status - - - # record video - # ================================================================= - rec_video_set = {} - - rec_video_set["fourcc"] = cv2.VideoWriter_fourcc(*'XVID') - #rec_video_set["fourcc"] = cv2.cv.CV_FOURCC("D", "I", "B", " ") - - rec_video_set["fps"] = 30.0 - rec_video_set["framesize"] = (640, 480) - rec_video_set["isColor"] = True - - rec_video_set["name"] = "default" - rec_video_set["path"] = DEFAULLT_VIDEOS_PATH - - rec_video_set["start_flag"] = False - rec_video_set["stop_flag"] = False - - rec_thread = None - - @staticmethod - def rec_video_work(): - if not os.path.exists(Vilib.rec_video_set["path"]): - # print('Path does not exist. Creating path now ... ') - os.makedirs(name=Vilib.rec_video_set["path"], - mode=0o751, - exist_ok=True - ) - time.sleep(0.01) - video_out = cv2.VideoWriter(Vilib.rec_video_set["path"]+'/'+Vilib.rec_video_set["name"]+'.avi', - Vilib.rec_video_set["fourcc"], Vilib.rec_video_set["fps"], - Vilib.rec_video_set["framesize"], Vilib.rec_video_set["isColor"]) - - while True: - if Vilib.rec_video_set["start_flag"] == True: - # video_out.write(Vilib.img_array[0]) - video_out.write(Vilib.img) - if Vilib.rec_video_set["stop_flag"] == True: - video_out.release() # note need to release the video writer - Vilib.rec_video_set["start_flag"] == False - break - - @staticmethod - def rec_video_run(): - if Vilib.rec_thread != None: - Vilib.rec_video_stop() - Vilib.rec_video_set["stop_flag"] = False - Vilib.rec_thread = threading.Thread(name='rec_video', target=Vilib.rec_video_work) - Vilib.rec_thread.daemon = True - Vilib.rec_thread.start() - - @staticmethod - def rec_video_start(): - Vilib.rec_video_set["start_flag"] = True - Vilib.rec_video_set["stop_flag"] = False - - @staticmethod - def rec_video_pause(): - Vilib.rec_video_set["start_flag"] = False - - @staticmethod - def rec_video_stop(): - Vilib.rec_video_set["start_flag"] == False - Vilib.rec_video_set["stop_flag"] = True - if Vilib.rec_thread != None: - Vilib.rec_thread.join(3) - Vilib.rec_thread = None - - # color detection - # ================================================================= - @staticmethod - def color_detect(color="red"): - ''' - :param color: could be red, green, blue, yellow , orange, purple - ''' - Vilib.color_detect_color = color - from .color_detection import color_detect_work, color_obj_parameter - Vilib.color_detect_work = color_detect_work - Vilib.color_obj_parameter = color_obj_parameter - Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] - Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] - Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] - Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] - Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] - - @staticmethod - def color_detect_func(img): - if Vilib.color_detect_color is not None \ - and Vilib.color_detect_color != 'close' \ - and hasattr(Vilib, "color_detect_work"): - img = Vilib.color_detect_work(img, Vilib.camera_width, Vilib.camera_height, Vilib.color_detect_color) - Vilib.detect_obj_parameter['color_x'] = Vilib.color_obj_parameter['x'] - Vilib.detect_obj_parameter['color_y'] = Vilib.color_obj_parameter['y'] - Vilib.detect_obj_parameter['color_w'] = Vilib.color_obj_parameter['w'] - Vilib.detect_obj_parameter['color_h'] = Vilib.color_obj_parameter['h'] - Vilib.detect_obj_parameter['color_n'] = Vilib.color_obj_parameter['n'] - return img - - @staticmethod - def close_color_detection(): - Vilib.color_detect_color = None - - # face detection - # ================================================================= - @staticmethod - def face_detect_switch(flag=False): - Vilib.face_detect_sw = flag - if Vilib.face_detect_sw: - from .face_detection import face_detect, set_face_detection_model, face_obj_parameter - Vilib.face_detect_work = face_detect - Vilib.set_face_detection_model = set_face_detection_model - Vilib.face_obj_parameter = face_obj_parameter - Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] - Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] - Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] - Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] - Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] - - @staticmethod - def face_detect_func(img): - if Vilib.face_detect_sw and hasattr(Vilib, "face_detect_work"): - img = Vilib.face_detect_work(img, Vilib.camera_width, Vilib.camera_height) - Vilib.detect_obj_parameter['human_x'] = Vilib.face_obj_parameter['x'] - Vilib.detect_obj_parameter['human_y'] = Vilib.face_obj_parameter['y'] - Vilib.detect_obj_parameter['human_w'] = Vilib.face_obj_parameter['w'] - Vilib.detect_obj_parameter['human_h'] = Vilib.face_obj_parameter['h'] - Vilib.detect_obj_parameter['human_n'] = Vilib.face_obj_parameter['n'] - return img - - # traffic sign detection - # ================================================================= - @staticmethod - def traffic_detect_switch(flag=False): - Vilib.traffic_detect_sw = flag - if Vilib.traffic_detect_sw: - from .traffic_sign_detection import traffic_sign_detect, traffic_sign_obj_parameter - Vilib.traffic_detect_work = traffic_sign_detect - Vilib.traffic_sign_obj_parameter = traffic_sign_obj_parameter - Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] - Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] - Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] - Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] - Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] - Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] - - @staticmethod - def traffic_detect_fuc(img): - if Vilib.traffic_detect_sw and hasattr(Vilib, "traffic_detect_work"): - img = Vilib.traffic_detect_work(img, border_rgb=(255, 0, 0)) - Vilib.detect_obj_parameter['traffic_sign_x'] = Vilib.traffic_sign_obj_parameter['x'] - Vilib.detect_obj_parameter['traffic_sign_y'] = Vilib.traffic_sign_obj_parameter['y'] - Vilib.detect_obj_parameter['traffic_sign_w'] = Vilib.traffic_sign_obj_parameter['w'] - Vilib.detect_obj_parameter['traffic_sign_h'] = Vilib.traffic_sign_obj_parameter['h'] - Vilib.detect_obj_parameter['traffic_sign_t'] = Vilib.traffic_sign_obj_parameter['t'] - Vilib.detect_obj_parameter['traffic_sign_acc'] = Vilib.traffic_sign_obj_parameter['acc'] - return img - - # qrcode recognition - # ================================================================= - @staticmethod - def qrcode_detect_switch(flag=False): - Vilib.qrcode_detect_sw = flag - if Vilib.qrcode_detect_sw: - from .qrcode_recognition import qrcode_recognize, qrcode_obj_parameter - Vilib.qrcode_recognize = qrcode_recognize - Vilib.qrcode_obj_parameter = qrcode_obj_parameter - Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] - Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] - Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] - Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] - Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] - Vilib.detect_obj_parameter['qr_list'] = Vilib.qrcode_obj_parameter['list'] - - @staticmethod - def qrcode_detect_func(img): - if Vilib.qrcode_detect_sw and hasattr(Vilib, "qrcode_recognize"): - img = Vilib.qrcode_recognize(img, border_rgb=(255, 0, 0)) - Vilib.detect_obj_parameter['qr_x'] = Vilib.qrcode_obj_parameter['x'] - Vilib.detect_obj_parameter['qr_y'] = Vilib.qrcode_obj_parameter['y'] - Vilib.detect_obj_parameter['qr_w'] = Vilib.qrcode_obj_parameter['w'] - Vilib.detect_obj_parameter['qr_h'] = Vilib.qrcode_obj_parameter['h'] - Vilib.detect_obj_parameter['qr_data'] = Vilib.qrcode_obj_parameter['data'] - return img - - # qrcode making - # ================================================================= - @staticmethod - def make_qrcode(data, - path=None, - version=1, - box_size=10, - border=4, - fill_color=(132, 112, 255), - back_color=(255, 255, 255) - ): - import qrcode # https://github.com/lincolnloop/python-qrcode - - qr = qrcode.QRCode( - version=version, - error_correction=qrcode.constants.ERROR_CORRECT_L, - box_size=box_size, - border=border, - ) - qr.add_data(data) - qr.make(fit=True) - qr_pil = qr.make_image(fill_color=fill_color, - back_color=back_color) - if path != None: - qr_pil.save(path) - - Vilib.qrcode_img = cv2.cvtColor(np.array(qr_pil), cv2.COLOR_RGB2BGR) - Vilib.qrcode_making_completed = True - - if Vilib.web_qrcode_flag: - Vilib.qrcode_img_encode = cv2.imencode('.jpg', Vilib.qrcode_img)[1].tobytes() - - - - @staticmethod - def display_qrcode_work(): - while True: - if Vilib.imshow_flag: - time.sleep(0.1) - continue - - # ----------- display qrcode on desktop ---------------- - if Vilib.imshow_qrcode_flag and Vilib.qrcode_making_completed : - Vilib.qrcode_making_completed = False - try: - if len(Vilib.qrcode_img) > 10: - cv2.imshow(Vilib.qrcode_win_name, Vilib.qrcode_img) - cv2.waitKey(1) - if cv2.getWindowProperty(Vilib.qrcode_win_name, cv2.WND_PROP_VISIBLE) == 0: - cv2.destroyWindow(Vilib.qrcode_win_name) - except Exception as e: - Vilib.imshow_qrcode_flag = False - print(f"imshow qrcode failed:\n {e}") - break - time.sleep(0.1) - - @staticmethod - def display_qrcode(local=True, web=True): - # check gui - if local == True: - if 'DISPLAY' in os.environ.keys(): - Vilib.imshow_qrcode_flag = True - print("Imgshow qrcode start ...") - else: - Vilib.imshow_qrcode_flag = False - print("Local display failed, because there is no gui.") - # web video - if web == True: - Vilib.web_qrcode_flag = True - print(f'QRcode display on:') - wlan0, eth0 = getIP() - if wlan0 != None: - print(f" http://{wlan0}:9000/qrcode") - if eth0 != None: - print(f" http://{eth0}:9000/qrcode") - print() # new line - - # ----------- flask_thread ---------------- - if Vilib.flask_thread == None or Vilib.flask_thread.is_alive() == False: - print('Starting web streaming ...') - Vilib.flask_thread = threading.Thread(name='flask_thread',target=web_camera_start) - Vilib.flask_thread.daemon = True - Vilib.flask_thread.start() - - if Vilib.qrcode_display_thread == None or Vilib.qrcode_display_thread.is_alive() == False: - Vilib.qrcode_display_thread = threading.Thread(name='qrcode_display',target=Vilib.display_qrcode_work) - Vilib.qrcode_display_thread.daemon = True - Vilib.qrcode_display_thread.start() - - - # image classification - # ================================================================= - @staticmethod - def image_classify_switch(flag=False): - from .image_classification import image_classification_obj_parameter - Vilib.image_classify_sw = flag - Vilib.image_classification_obj_parameter = image_classification_obj_parameter - - @staticmethod - def image_classify_set_model(path): - if not os.path.exists(path): - raise ValueError('incorrect model path ') - Vilib.image_classification_model = path - - @staticmethod - def image_classify_set_labels(path): - if not os.path.exists(path): - raise ValueError('incorrect labels path ') - Vilib.image_classification_labels = path - - @staticmethod - def image_classify_fuc(img): - if Vilib.image_classify_sw == True: - # print('classify_image starting') - from .image_classification import classify_image - img = classify_image(image=img, - model=Vilib.image_classification_model, - labels=Vilib.image_classification_labels) - return img - - # objects detection - # ================================================================= - @staticmethod - def object_detect_switch(flag=False): - Vilib.objects_detect_sw = flag - if Vilib.objects_detect_sw == True: - from .objects_detection import object_detection_list_parameter - Vilib.object_detection_list_parameter = object_detection_list_parameter - - @staticmethod - def object_detect_set_model(path): - if not os.path.exists(path): - raise ValueError('incorrect model path ') - Vilib.objects_detection_model = path - - @staticmethod - def object_detect_set_labels(path): - if not os.path.exists(path): - raise ValueError('incorrect labels path ') - Vilib.objects_detection_labels = path - - @staticmethod - def object_detect_fuc(img): - if Vilib.objects_detect_sw == True: - # print('detect_objects starting') - from .objects_detection import detect_objects - img = detect_objects(image=img, - model=Vilib.objects_detection_model, - labels=Vilib.objects_detection_labels) - return img - - # hands detection - # ================================================================= - @staticmethod - def hands_detect_switch(flag=False): - from .hands_detection import DetectHands - Vilib.detect_hands = DetectHands() - Vilib.hands_detect_sw = flag - - @staticmethod - def hands_detect_fuc(img): - if Vilib.hands_detect_sw == True: - img, Vilib.detect_obj_parameter['hands_joints'] = Vilib.detect_hands.work(image=img) - return img - - # pose detection - # ================================================================= - @staticmethod - def pose_detect_switch(flag=False): - from .pose_detection import DetectPose - Vilib.pose_detect = DetectPose() - Vilib.pose_detect_sw = flag - - @staticmethod - def pose_detect_fuc(img): - if Vilib.pose_detect_sw == True and hasattr(Vilib, "pose_detect"): - img, Vilib.detect_obj_parameter['body_joints'] = Vilib.pose_detect.work(image=img) - return img From c2a44c387fd8ac8980918477562a306054c385da Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 7 Nov 2025 18:21:10 +0100 Subject: [PATCH 5/5] Cleanup --- vilib/comprehensive_fix.py | 84 -------------------------------------- 1 file changed, 84 deletions(-) delete mode 100644 vilib/comprehensive_fix.py diff --git a/vilib/comprehensive_fix.py b/vilib/comprehensive_fix.py deleted file mode 100644 index d23f0b3..0000000 --- a/vilib/comprehensive_fix.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python3 -""" -Comprehensive fix for vilib camera threading issues -This patches both camera_close and camera methods -""" - -def apply_comprehensive_fix(): - # Read the original file - with open('vilib.py', 'r') as f: - content = f.read() - - # 1. Fix camera_close method - old_camera_close = ''' @staticmethod - def camera_close(): - if Vilib.camera_thread != None: - Vilib.camera_run = False - time.sleep(0.1)''' - - new_camera_close = ''' @staticmethod - def camera_close(): - if Vilib.camera_thread != None: - Vilib.camera_run = False - time.sleep(0.2) - # Wait for camera thread to finish - if Vilib.camera_thread.is_alive(): - Vilib.camera_thread.join(timeout=3.0) - - # Properly close and reinitialize Picamera2 - try: - if Vilib.picam2 is not None: - Vilib.picam2.close() - time.sleep(0.2) - - # Recreate Picamera2 object completely fresh - Vilib.picam2 = Picamera2() - - except Exception as e: - print(f"Warning during camera cleanup: {e}") - # Force recreation of Picamera2 object - try: - Vilib.picam2 = Picamera2() - except Exception as e2: - print(f"Failed to reinitialize camera: {e2}") - - # Reset thread reference - Vilib.camera_thread = None''' - - # 2. Fix camera method to be more robust - old_camera_start = ''' preview_config = picam2.preview_configuration - # preview_config.size = (800, 600) - preview_config.size = Vilib.camera_size''' - - new_camera_start = ''' # Ensure we have a fresh configuration - try: - preview_config = picam2.preview_configuration - if preview_config is None: - # Create new configuration if needed - config = picam2.create_preview_configuration() - picam2.configure(config) - preview_config = picam2.preview_configuration - except Exception as e: - print(f"Error getting preview configuration: {e}") - # Try to create a new configuration - config = picam2.create_preview_configuration() - picam2.configure(config) - preview_config = picam2.preview_configuration - - # preview_config.size = (800, 600) - if preview_config is not None: - preview_config.size = Vilib.camera_size''' - - # Apply the fixes - content = content.replace(old_camera_close, new_camera_close) - content = content.replace(old_camera_start, new_camera_start) - - # Write the fixed file - with open('vilib.py', 'w') as f: - f.write(content) - - print("Applied comprehensive vilib fix!") - return True - -if __name__ == "__main__": - apply_comprehensive_fix()