-
Notifications
You must be signed in to change notification settings - Fork 25
/
Copy pathclassify-video.py
115 lines (88 loc) · 4.51 KB
/
classify-video.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
#!/usr/bin/env python
import device_patches # Device specific patches for Jetson Nano (needs to be before importing cv2)
import cv2
import os
import time
import sys, getopt
import numpy as np
from edge_impulse_linux.image import ImageImpulseRunner
runner = None
# if you don't want to see a video preview, set this to False
show_camera = True
if (sys.platform == 'linux' and not os.environ.get('DISPLAY')):
show_camera = False
def help():
print('python classify-video.py <path_to_model.eim> <path_to_video.mp4>')
def main(argv):
try:
opts, args = getopt.getopt(argv, "h", ["--help"])
except getopt.GetoptError:
help()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
help()
sys.exit()
if len(args) != 2:
help()
sys.exit(2)
model = args[0]
dir_path = os.path.dirname(os.path.realpath(__file__))
modelfile = os.path.join(dir_path, model)
print('MODEL: ' + modelfile)
with ImageImpulseRunner(modelfile) as runner:
try:
model_info = runner.init()
# model_info = runner.init(debug=True) # to get debug print out
print('Loaded runner for "' + model_info['project']['owner'] + ' / ' + model_info['project']['name'] + '"')
labels = model_info['model_parameters']['labels']
vidcap = cv2.VideoCapture(args[1])
sec = 0
start_time = time.time()
def getFrame(sec):
vidcap.set(cv2.CAP_PROP_POS_MSEC,sec*1000)
hasFrames,image = vidcap.read()
if hasFrames:
return image
else:
print('Failed to load frame', args[1])
exit(1)
img = getFrame(sec)
while img.size != 0:
# imread returns images in BGR format, so we need to convert to RGB
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# get_features_from_image also takes a crop direction arguments in case you don't have square images
features, cropped = runner.get_features_from_image(img)
# the image will be resized and cropped, save a copy of the picture here
# so you can see what's being passed into the classifier
cv2.imwrite('debug.jpg', cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
res = runner.classify(features)
if "classification" in res["result"].keys():
print('Result (%d ms.) ' % (res['timing']['dsp'] + res['timing']['classification']), end='')
for label in labels:
score = res['result']['classification'][label]
print('%s: %.2f\t' % (label, score), end='')
print('', flush=True)
elif "bounding_boxes" in res["result"].keys():
print('Found %d bounding boxes (%d ms.)' % (len(res["result"]["bounding_boxes"]), res['timing']['dsp'] + res['timing']['classification']))
for bb in res["result"]["bounding_boxes"]:
print('\t%s (%.2f): x=%d y=%d w=%d h=%d' % (bb['label'], bb['value'], bb['x'], bb['y'], bb['width'], bb['height']))
img = cv2.rectangle(cropped, (bb['x'], bb['y']), (bb['x'] + bb['width'], bb['y'] + bb['height']), (255, 0, 0), 1)
if "visual_anomaly_grid" in res["result"].keys():
print('Found %d visual anomalies (%d ms.)' % (len(res["result"]["visual_anomaly_grid"]), res['timing']['dsp'] + res['timing']['classification']))
for grid_cell in res["result"]["visual_anomaly_grid"]:
print('\t%s (%.2f): x=%d y=%d w=%d h=%d' % (grid_cell['label'], grid_cell['value'], grid_cell['x'], grid_cell['y'], grid_cell['width'], grid_cell['height']))
img = cv2.rectangle(cropped, (grid_cell['x'], grid_cell['y']), (grid_cell['x'] + grid_cell['width'], grid_cell['y'] + grid_cell['height']), (255, 125, 0), 1)
if (show_camera):
cv2.imshow('edgeimpulse', cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
if cv2.waitKey(1) == ord('q'):
break
sec = time.time() - start_time
sec = round(sec, 2)
print("Getting frame at: %.2f sec" % sec)
img = getFrame(sec)
finally:
if (runner):
runner.stop()
if __name__ == "__main__":
main(sys.argv[1:])