-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
executable file
·119 lines (92 loc) · 4.24 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import time
import edgeiq
import easyocr
import os
"""
Use object detection to detect and read automotive license plates in the frame in realtime.
To change the computer vision model, follow this guide:
https://docs.alwaysai.co/application_development/application_configuration.html#change-the-computer-vision-model
To change the engine and accelerator, follow this guide:
https://docs.alwaysai.co/application_development/application_configuration.html#change-the-engine-and-accelerator
"""
def main():
# Load the object detection model
obj_detect = edgeiq.ObjectDetection(
"alwaysai/vehicle_license_mobilenet_ssd")
obj_detect.load(engine=edgeiq.Engine.DNN)
# Load the tracker to reduce tracking burden
tracker = edgeiq.CentroidTracker()
fps = edgeiq.FPS()
# Load the OCR reader
reader = easyocr.Reader(
['en'],
gpu=False,
download_enabled=False,
model_storage_directory='easy_ocr_model',
user_network_directory=os.getcwd())
# Console output
print("Loaded model:\n{}\n".format(obj_detect.model_id))
print("Engine: {}".format(obj_detect.engine))
print("Accelerator: {}\n".format(obj_detect.accelerator))
print("Labels:\n{}\n".format(obj_detect.labels))
try:
# Prepare to run detection on all the .mp4 videos in the video/ subfolder
video_paths = edgeiq.list_files(
base_path="./video/", valid_exts=".mp4")
streamer = edgeiq.Streamer().setup()
for video_path in video_paths:
print(f'Playing video file: {video_path}')
with edgeiq.FileVideoStream(video_path) \
as video_stream:
# Allow Webcam to warm up
time.sleep(2.0)
fps.start()
# loop detection
while video_stream.more():
frame = video_stream.read()
predictions = []
results = obj_detect.detect_objects(
frame, confidence_level=.5)
frame = edgeiq.markup_image(
frame, results.predictions, colors=obj_detect.colors)
# Generate text to display on streamer
text = ["Model: {}".format(obj_detect.model_id)]
text.append(
"Inference time: {:1.3f} s".format(results.duration))
text.append("Objects:")
objects = tracker.update(results.predictions)
for (object_id, prediction) in objects.items():
text.append("{}_{}: {:2.2f}%".format(
prediction.label,
object_id,
prediction.confidence * 100))
if(prediction.label == "license_plate"):
license_image = edgeiq.cutout_image(
frame, prediction.box)
try:
output = reader.readtext(
license_image, detail=0)
print(
f'{prediction.label}_{object_id}: {output}'
)
text.append('{}_{} reads: {}'.format(
prediction.label, object_id, output))
except Exception as e:
print(
f'app.py: Error processing image through OCR lib: ERROR: {e}')
# either way, use 'predictions' to mark up the image and update text
frame = edgeiq.markup_image(
frame, predictions, show_labels=True,
show_confidences=False, colors=obj_detect.colors)
streamer.send_data(frame, text)
fps.update()
if streamer.check_exit():
break
finally:
fps.stop()
streamer.close()
print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
print("approx. FPS: {:.2f}".format(fps.compute_fps()))
print("Program Ending")
if __name__ == "__main__":
main()