From 79174822a5916e8a7d29d48a1840b056b3b57c19 Mon Sep 17 00:00:00 2001 From: nik-mosaic <101217697+nik-mosaic@users.noreply.github.com> Date: Wed, 20 Sep 2023 17:40:45 -0700 Subject: [PATCH] Add providers param to ONNX Session in tests (#2553) * add providers arg to ONNX Session in tests * Add providers arg to all ort.InferenceSession calls --- examples/exporting_for_inference.ipynb | 4 ++-- tests/algorithms/test_torch_export.py | 2 +- tests/test_full_nlp.py | 3 ++- tests/utils/test_inference.py | 6 +++--- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/examples/exporting_for_inference.ipynb b/examples/exporting_for_inference.ipynb index 0bb200d275..e65814ed6b 100644 --- a/examples/exporting_for_inference.ipynb +++ b/examples/exporting_for_inference.ipynb @@ -408,7 +408,7 @@ "import numpy as np\n", "\n", "# run inference\n", - "ort_session = ort.InferenceSession(model_save_path)\n", + "ort_session = ort.InferenceSession(model_save_path, providers=['CPUExecutionProvider'])\n", "outputs = ort_session.run(\n", " None,\n", " {'input': input[0].numpy()})\n", @@ -513,7 +513,7 @@ "metadata": {}, "outputs": [], "source": [ - "ort_session = ort.InferenceSession(model_save_path)\n", + "ort_session = ort.InferenceSession(model_save_path, providers=['CPUExecutionProvider'])\n", "new_outputs = ort_session.run(\n", " None,\n", " {'input': input[0].numpy()},\n", diff --git a/tests/algorithms/test_torch_export.py b/tests/algorithms/test_torch_export.py index 8525e2c9f8..8e28765a62 100644 --- a/tests/algorithms/test_torch_export.py +++ b/tests/algorithms/test_torch_export.py @@ -169,7 +169,7 @@ def test_surgery_onnx( onnx.checker.check_model(onnx_model) # type: ignore (third-party) # run inference - ort_session = ort.InferenceSession(onnx_path) + ort_session = ort.InferenceSession(onnx_path, providers=['CPUExecutionProvider']) outputs = ort_session.run( None, {'input': input[0].numpy()}, diff --git a/tests/test_full_nlp.py b/tests/test_full_nlp.py index 6608c509c4..5af0d1d82f 100644 --- a/tests/test_full_nlp.py +++ b/tests/test_full_nlp.py @@ -172,7 +172,8 @@ def inference_test_helper(finetuning_output_path, rud, finetuning_model, algorit ort = pytest.importorskip('onnxruntime') loaded_inference_model = onnx.load(str(tmp_path / 'inference_checkpoints' / 'exported_model.onnx')) onnx.checker.check_model(loaded_inference_model) - ort_session = ort.InferenceSession(str(tmp_path / 'inference_checkpoints' / 'exported_model.onnx')) + ort_session = ort.InferenceSession(str(tmp_path / 'inference_checkpoints' / 'exported_model.onnx'), + providers=['CPUExecutionProvider']) for key, value in copied_batch.items(): copied_batch[key] = value.numpy() diff --git a/tests/utils/test_inference.py b/tests/utils/test_inference.py index ffae6c71ca..2ecf483b9b 100644 --- a/tests/utils/test_inference.py +++ b/tests/utils/test_inference.py @@ -155,7 +155,7 @@ def test_huggingface_export_for_inference_onnx(onnx_opset_version, tiny_bert_con onnx.checker.check_model(loaded_model) - ort_session = ort.InferenceSession(save_path) + ort_session = ort.InferenceSession(save_path, providers=['CPUExecutionProvider']) for key, value in sample_input.items(): sample_input[key] = cpu_device.tensor_to_device(value).numpy() @@ -217,7 +217,7 @@ def test_export_for_inference_onnx(model_cls, sample_input, onnx_opset_version, loaded_model = onnx.load(save_path) onnx.checker.check_model(loaded_model) - ort_session = ort.InferenceSession(save_path) + ort_session = ort.InferenceSession(save_path, providers=['CPUExecutionProvider']) loaded_model_out = ort_session.run( None, {'input': cpu_device.tensor_to_device(sample_input[0]).numpy()}, @@ -355,7 +355,7 @@ def test_export_for_inference_onnx_ddp(model_cls, sample_input, onnx_opset_versi loaded_model = onnx.load(save_path) onnx.checker.check_model(loaded_model) - ort_session = ort.InferenceSession(save_path) + ort_session = ort.InferenceSession(save_path, providers=['CPUExecutionProvider']) loaded_model_out = ort_session.run( None, {'input': sample_input[0].numpy()},