From d80d80a8287df7e5d884c482e6d38297043bae57 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 13:19:43 +0100 Subject: [PATCH] Add ONNX inference providers Fix for https://github.com/ultralytics/yolov5/issues/5916 --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index ec5fbfaec4ca..c18fe10d4089 100644 --- a/models/common.py +++ b/models/common.py @@ -322,7 +322,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): LOGGER.info(f'Loading {w} for ONNX Runtime inference...') check_requirements(('onnx', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime')) import onnxruntime - session = onnxruntime.InferenceSession(w, None) + session = onnxruntime.InferenceSession(w, providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download