forked from akindofyoga/tflite-inf
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_pipeline_both_tfl_time.py
119 lines (88 loc) · 3.87 KB
/
test_pipeline_both_tfl_time.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import logging
import os
import time
import numpy as np
from tflite_support.task import core
from tflite_support.task import processor
from tflite_support.task import vision
import cv2
CLASSIFIER_FILENAME = '/stirling_voc/r50.tflite'
DETECTOR_NAME = '/stirling_voc/sitrling_all_classes.tflite'
TEST_IMAGES = '/stirling_voc/test_images'
THRESHOLD = 0.4
class Pipeline:
def __init__(self):
base_options = core.BaseOptions(
file_name=CLASSIFIER_FILENAME, use_coral=False, num_threads=8)
classification_options = processor.ClassificationOptions(
max_results=1, score_threshold=THRESHOLD)
options = vision.ImageClassifierOptions(
base_options=base_options, classification_options=classification_options)
self.classifier = vision.ImageClassifier.create_from_options(options)
base_options = core.BaseOptions(
file_name=DETECTOR_NAME, use_coral=False, num_threads=8)
detection_options = processor.DetectionOptions(
max_results=1, score_threshold=THRESHOLD)
options = vision.ObjectDetectorOptions(
base_options=base_options, detection_options=detection_options)
self.detector = vision.ObjectDetector.create_from_options(options)
def inf(self, img_path, correct_label):
bgr_image = cv2.imread(img_path, cv2.IMREAD_COLOR)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
input_tensor = vision.TensorImage.create_from_array(rgb_image)
# Run object detection estimation using the model.
detection_result = self.detector.detect(input_tensor)
if len(detection_result.detections) == 0:
return False
assert len(detection_result.detections) == 1
detection = detection_result.detections[0]
category = detection.categories[0]
assert category.score > THRESHOLD
bbox = detection.bounding_box
left = bbox.origin_x
right = bbox.origin_x + bbox.width
top = bbox.origin_y
bottom = bbox.origin_y + bbox.height
bgr_crop = bgr_image[top:bottom, left:right]
if (top < 0) or (left < 0):
return False
if (bbox.width < 1) or (bbox.height < 1):
return False
# Converting to JPEG and back again raises performance
_, jpeg_crop = cv2.imencode(".jpg", bgr_crop)
bgr_crop = cv2.imdecode(jpeg_crop, cv2.IMREAD_COLOR)
rgb_crop = cv2.cvtColor(bgr_crop, cv2.COLOR_BGR2RGB)
tensor_image = vision.TensorImage.create_from_array(rgb_crop)
categories = self.classifier.classify(tensor_image)
if len(categories.classifications[0].categories) == 0:
return False
assert len(categories.classifications[0].categories) == 1
category = categories.classifications[0].categories[0]
pred_label_name = category.category_name
return (pred_label_name == correct_label)
def main():
good = 0
bad = 0
count = 0
pipeline = Pipeline()
start = time.clock_gettime_ns(time.CLOCK_MONOTONIC)
for correct_label in os.listdir(TEST_IMAGES):
img_dir = os.path.join(TEST_IMAGES, correct_label)
for img_name in os.listdir(img_dir):
img_path = os.path.join(img_dir, img_name)
if pipeline.inf(img_path, correct_label):
good += 1
else:
bad += 1
if count == 200:
end = time.clock_gettime_ns(time.CLOCK_MONOTONIC)
print('time for 200 in ns', (end - start))
print('good:', good)
print('bad:', bad)
count = 0
start = time.clock_gettime_ns(time.CLOCK_MONOTONIC)
count += 1
print('good:', good)
print('bad:', bad)
if __name__ == '__main__':
main()