From 766fecbfc982ee4f04dfb179f393fb58b724050c Mon Sep 17 00:00:00 2001 From: PhilipMay Date: Fri, 26 Jun 2020 21:29:03 +0200 Subject: [PATCH 1/2] Fix if statement --- test/benchmarks/conftest.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/benchmarks/conftest.py b/test/benchmarks/conftest.py index d58e9ce55..d8f44ddbf 100644 --- a/test/benchmarks/conftest.py +++ b/test/benchmarks/conftest.py @@ -15,9 +15,9 @@ def onnx_adaptive_model_qa(use_gpu, num_processes): model_name_or_path, device="cpu", task_type="question_answering" ) model.convert_to_onnx(onnx_model_export_path) - - model = Inferencer.load( - onnx_model_export_path, task_type="question_answering", batch_size=1, num_processes=num_processes, gpu=use_gpu - ) + else: + model = Inferencer.load( + onnx_model_export_path, task_type="question_answering", batch_size=1, num_processes=num_processes, gpu=use_gpu + ) return model From 5e4e042dd38d0df0e64931d0ca3dae0ca2c1cfe1 Mon Sep 17 00:00:00 2001 From: Tanay Soni Date: Wed, 8 Jul 2020 17:16:02 +0200 Subject: [PATCH 2/2] Fix ONNX conversion in pytest fixture --- test/benchmarks/conftest.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/test/benchmarks/conftest.py b/test/benchmarks/conftest.py index d8f44ddbf..16d7c3491 100644 --- a/test/benchmarks/conftest.py +++ b/test/benchmarks/conftest.py @@ -7,17 +7,18 @@ @pytest.fixture(scope="session") -def onnx_adaptive_model_qa(use_gpu, num_processes): - model_name_or_path = "deepset/bert-base-cased-squad2" - onnx_model_export_path = Path("benchmarks/onnx-export") - if not (onnx_model_export_path / "model.onnx").is_file(): +def onnx_adaptive_model_qa(use_gpu, num_processes, model_name_or_path="deepset/bert-base-cased-squad2"): + if (Path(model_name_or_path) / "model.onnx").is_file(): # load model directly if in ONNX format + onnx_model_path = model_name_or_path + else: # convert to ONNX format + onnx_model_path = Path("benchmarks/onnx-export") model = AdaptiveModel.convert_from_transformers( model_name_or_path, device="cpu", task_type="question_answering" ) - model.convert_to_onnx(onnx_model_export_path) - else: - model = Inferencer.load( - onnx_model_export_path, task_type="question_answering", batch_size=1, num_processes=num_processes, gpu=use_gpu - ) + model.convert_to_onnx(onnx_model_path) + + model = Inferencer.load( + onnx_model_path, task_type="question_answering", batch_size=1, num_processes=num_processes, gpu=use_gpu + ) return model