Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

onnxruntime 1.16 support #584

Merged
merged 10 commits into from
Sep 20, 2023
Prev Previous commit
Next Next commit
disable vitis test for ort 1.6.0
  • Loading branch information
guotuofeng committed Sep 20, 2023
commit 200a2329b3284740629ff4ec8d695bffd65d46c4
5 changes: 5 additions & 0 deletions examples/test/test_resnet_vitis_ai_ptq_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
from pathlib import Path

import pytest
from onnxruntime import __version__ as OrtVersion
from packaging import version
from utils import check_output, patch_config

from olive.common.utils import retry_func, run_subprocess
Expand All @@ -30,6 +32,9 @@ def setup():
@pytest.mark.parametrize("execution_order", ["pass-by-pass"])
@pytest.mark.parametrize("system", ["local_system", "aml_system"])
@pytest.mark.parametrize("olive_json", ["resnet_vitis_ai_ptq_cpu.json"])
@pytest.mark.skipif(
guotuofeng marked this conversation as resolved.
Show resolved Hide resolved
OrtVersion >= version.parse("1.6.0"), reason="VitisAIQuantization is not supported in ORT 1.6.0 with TensorsData"
guotuofeng marked this conversation as resolved.
Show resolved Hide resolved
)
def test_resnet(search_algorithm, execution_order, system, olive_json):
# TODO: add gpu e2e test
from olive.workflows import run as olive_run
Expand Down
4 changes: 3 additions & 1 deletion olive/passes/onnx/quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,9 @@ def _quant_preprocess(self, model: ONNXModel, output_model_path: Union[str, Path
# there are some problems with the path to where the external data is saved
# need to find out why before enabling this

logger.warning(f"Failed to run quantization preprocessing with error of {e}. Using original model.")
logger.warning(
f"Failed to run quantization preprocessing with error of {e}. Using original model.", exc_info=True
)
# save original model to output path
onnx_model = onnx.load(model.model_path)
model_proto_to_file(
Expand Down
6 changes: 6 additions & 0 deletions test/unit_test/passes/vitis_ai/test_vitis_ai_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,10 @@
from test.unit_test.utils import get_onnx_model

import numpy as np
import pytest
from onnxruntime import __version__ as OrtVersion
from onnxruntime.quantization.calibrate import CalibrationDataReader
from packaging import version

from olive.passes.olive_pass import create_pass_from_dict
from olive.passes.onnx.vitis_ai_quantization import VitisAIQuantization
Expand All @@ -34,6 +37,9 @@ def dummy_calibration_reader(data_dir=None, batch_size=1, *args, **kwargs):
return RandomDataReader()


@pytest.mark.skipif(
OrtVersion >= version.parse("1.6.0"), reason="VitisAIQuantization is not supported in ORT 1.6.0 with TensorsData"
guotuofeng marked this conversation as resolved.
Show resolved Hide resolved
)
def test_vitis_ai_quantization_pass(tmp_path):
# setup
input_model = get_onnx_model()
Expand Down
Loading