This repository has been archived by the owner on Oct 21, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 12
/
test_model.py
49 lines (39 loc) · 1.56 KB
/
test_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0, '../../')
import microndla
import torch
import torch.onnx
import numpy as np
import onnxruntime as rt
from argparse import ArgumentParser
# argument Checking
parser = ArgumentParser(description="Run a ONNX model")
_ = parser.add_argument
_('-p','--profile', type=int, default=0, help='Profile mode: 0: compare accuracy (default) 1: entire model 2: each layer in model')
_('model', type=str, default='', help='model')
_('input_shape', type=str, default='', help='input shape WxHxC')
args = parser.parse_args()
torch.manual_seed(0)
res = [int(i) for i in args.input_shape.split('x') if i.isdigit()]
image = torch.randn(1, res[2], res[1], res[0], dtype=torch.float32)
sf = microndla.MDLA()
if args.profile >= 1:
sf.SetFlag('debug', 'b')#debug options
if args.profile == 2:
sf.SetFlag('options', 'Ls')#profile all layer in the model
# Compile and Run on MDLA
sf.Compile(args.model)
in_1 = np.ascontiguousarray(image)
result = sf.Run(in_1)
if args.profile == 0:
#Run using ONNX runtime
sess = rt.InferenceSession(args.model)
input_name = sess.get_inputs()[0].name
result_pyt = sess.run(None, {input_name:in_1})
if type(result_pyt) is list:
result_pyt = result_pyt[0].reshape(-1)
error_mean=(np.absolute(result-result_pyt).mean()/np.absolute(result_pyt).max())*100.0
error_max=(np.absolute(result-result_pyt).max()/np.absolute(result_pyt).max())*100.0
print('\x1b[32mMean/max error compared to pytorch are {:.3f}/{:.3f} %\x1b[0m'.format(error_mean, error_max))