-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathALPR_adapter.py
More file actions
111 lines (81 loc) · 3.6 KB
/
ALPR_adapter.py
File metadata and controls
111 lines (81 loc) · 3.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# Import our general libraries
import time
# Import CodeProject.AI SDK
from codeproject_ai_sdk import RequestData, ModuleRunner, JSON
# import sys
# sys.path.append("../../CodeProject.AI-Server/src/SDK/Python/src/codeproject_ai_sdk/")
# from common import JSON
# from request_data import RequestData
# from module_runner import ModuleRunner
# import modules we've installed as for this module to work
from PIL import Image
# Import the method of the module we're wrapping
from ALPR import init_detect_platenumber, detect_platenumber
# Import our helpers
from options import Options
class ALPR_adapter(ModuleRunner):
def __init__(self):
super().__init__()
self.opts = Options()
def initialise(self) -> None:
self.can_use_GPU = self.system_info.hasPaddleGPU
# HACK: We're seeing problems with GPU support on older cards. Allow
# some checks to be done
if self.system_info.hasPaddleGPU:
import paddle
if not paddle.device.cuda.device_count() or \
paddle.device.cuda.get_device_capability()[0] < self.opts.min_compute_capability:
self.can_use_GPU = False
if paddle.device.get_cudnn_version() / 100.0 < self.opts.min_cuDNN_version:
self.can_use_GPU = False
# end hack
self.opts.use_gpu = self.enable_GPU and self.can_use_GPU
if self.opts.use_gpu:
self.inference_device = "GPU"
self.inference_library = "CUDA" # PaddleOCR supports only CUDA enabled GPUs at this point
init_detect_platenumber(self.opts)
self._num_items_found = 0
async def process(self, data: RequestData) -> JSON:
try:
image: Image = data.get_image(0)
start_time = time.perf_counter()
result = await detect_platenumber(self, self.opts, image)
if "error" in result and result["error"]:
response = { "success": False, "error": result["error"] }
return response
predictions = result["predictions"]
if len(predictions) > 3:
message = 'Found ' + (', '.join(det["label"] for det in predictions)) + "..."
elif len(predictions) > 0:
message = 'Found ' + (', '.join(det["label"] for det in predictions))
else:
message = "No plates found"
response = {
"success": True,
"processMs" : int((time.perf_counter() - start_time) * 1000),
"inferenceMs" : result["inferenceMs"],
"predictions": predictions,
"message": message
}
except Exception as ex:
await self.report_error_async(ex, __file__)
response = { "success": False, "error": "unable to process the image" }
return response
def status(self) -> JSON:
statusData = super().status()
statusData["numItemsFound"] = self._num_items_found
return statusData
def update_statistics(self, response):
super().update_statistics(response)
if "success" in response and response["success"] and "predictions" in response:
predictions = response["predictions"]
self._num_items_found += len(predictions)
def selftest(slf) -> JSON:
try:
import paddle
paddle.utils.run_check()
return { "success": True, "message": "PaddlePaddle self test successful" }
except:
return { "success": False, "message": "PaddlePaddle self test failed" }
if __name__ == "__main__":
ALPR_adapter().start_loop()