Skip to content

Commit

Permalink
馃 Unify engine run returning (#580)
Browse files Browse the repository at this point in the history
## Describe your changes

This PR is used to unify the returning of `run_no_search` and
`run_search` with olive footprints.

More discussions here: #572

## Checklist before requesting a review
- [ ] Add unit tests for this change.
- [ ] Make sure all tests can pass.
- [ ] Update documents if necessary.
- [ ] Format your code by running `pre-commit run --all-files`
- [ ] Is this a user-facing change? If yes, give a description of this
change to be included in the release notes.

## (Optional) Issue link
  • Loading branch information
trajepl committed Sep 19, 2023
1 parent 6f78065 commit 38bd1d6
Show file tree
Hide file tree
Showing 17 changed files with 97 additions and 114 deletions.
4 changes: 2 additions & 2 deletions examples/test/test_bert_cuda_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from pathlib import Path

import pytest
from utils import check_search_output, patch_config
from utils import check_output, patch_config


@pytest.fixture(scope="module", autouse=True)
Expand All @@ -32,4 +32,4 @@ def test_bert(search_algorithm, execution_order, system, olive_json, enable_cuda
olive_config["passes"]["perf_tuning"]["config"]["enable_cuda_graph"] = enable_cuda_graph

footprint = olive_run(olive_config)
check_search_output(footprint)
check_output(footprint)
4 changes: 2 additions & 2 deletions examples/test/test_bert_ptq_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from pathlib import Path

import pytest
from utils import check_search_output, patch_config
from utils import check_output, patch_config


@pytest.fixture(scope="module", autouse=True)
Expand All @@ -30,4 +30,4 @@ def test_bert(search_algorithm, execution_order, system, olive_json):
olive_config = patch_config(olive_json, search_algorithm, execution_order, system)

footprint = olive_run(olive_config)
check_search_output(footprint)
check_output(footprint)
4 changes: 2 additions & 2 deletions examples/test/test_bert_ptq_cpu_aml.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from pathlib import Path

import pytest
from utils import check_no_search_output, check_search_output, patch_config
from utils import check_output, patch_config


@pytest.fixture(scope="module", autouse=True)
Expand Down Expand Up @@ -38,4 +38,4 @@ def test_bert(olive_test_knob):

olive_config = patch_config(*olive_test_knob)
output = olive_run(olive_config)
check_no_search_output(output) if not olive_test_knob[1] else check_search_output(output)
check_output(output)
4 changes: 2 additions & 2 deletions examples/test/test_bert_ptq_cpu_docker.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from pathlib import Path

import pytest
from utils import check_search_output, patch_config
from utils import check_output, patch_config


@pytest.fixture(scope="module", autouse=True)
Expand All @@ -33,4 +33,4 @@ def test_bert(search_algorithm, execution_order, system, olive_json):
olive_config = patch_config(olive_json, search_algorithm, execution_order, system)

footprint = olive_run(olive_config)
check_search_output(footprint)
check_output(footprint)
4 changes: 2 additions & 2 deletions examples/test/test_cifar10_openvino_intel_hw.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from pathlib import Path

import pytest
from utils import check_search_output
from utils import check_output


@pytest.fixture(scope="module", autouse=True)
Expand All @@ -22,4 +22,4 @@ def test_cifar10():
import cifar10

metrics = cifar10.main()
check_search_output(metrics)
check_output(metrics)
4 changes: 2 additions & 2 deletions examples/test/test_resnet_ptq_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from pathlib import Path

import pytest
from utils import check_search_output, patch_config
from utils import check_output, patch_config

from olive.common.utils import retry_func, run_subprocess

Expand Down Expand Up @@ -37,4 +37,4 @@ def test_resnet(search_algorithm, execution_order, system, olive_json):
olive_config = patch_config(olive_json, search_algorithm, execution_order, system)

footprint = olive_run(olive_config)
check_search_output(footprint)
check_output(footprint)
4 changes: 2 additions & 2 deletions examples/test/test_resnet_qat.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from pathlib import Path

import pytest
from utils import check_search_output, patch_config
from utils import check_output, patch_config

from olive.common.utils import retry_func, run_subprocess

Expand Down Expand Up @@ -38,4 +38,4 @@ def test_resnet(search_algorithm, execution_order, system, olive_json):
olive_config = patch_config(olive_json, search_algorithm, execution_order, system)

footprint = olive_run(olive_config)
check_search_output(footprint)
check_output(footprint)
4 changes: 2 additions & 2 deletions examples/test/test_resnet_vitis_ai_ptq_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from pathlib import Path

import pytest
from utils import check_search_output, patch_config
from utils import check_output, patch_config

from olive.common.utils import retry_func, run_subprocess

Expand Down Expand Up @@ -37,4 +37,4 @@ def test_resnet(search_algorithm, execution_order, system, olive_json):
olive_config = patch_config(olive_json, search_algorithm, execution_order, system)

footprint = olive_run(olive_config)
check_search_output(footprint)
check_output(footprint)
4 changes: 2 additions & 2 deletions examples/test/test_whisper.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from pathlib import Path

import pytest
from utils import check_no_search_output
from utils import check_output


@pytest.fixture(scope="module", autouse=True)
Expand Down Expand Up @@ -43,7 +43,7 @@ def test_whisper(device_precision):

# test workflow
result = olive_run(olive_config)
check_no_search_output(result)
check_output(result)

# test transcription
from test_transcription import main as test_transcription
Expand Down
13 changes: 1 addition & 12 deletions examples/test/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import os


def check_search_output(footprints):
def check_output(footprints):
"""Check if the search output is valid."""
assert footprints, "footprints is empty. The search must have failed for all accelerator specs."
for footprint in footprints.values():
Expand All @@ -15,17 +15,6 @@ def check_search_output(footprints):
assert all([metric_result.value > 0 for metric_result in v.metrics.value.values()])


def check_no_search_output(outputs):
assert outputs, "outputs is empty. The run must have failed for all accelerator specs."
# k:v => accelerator_spec: pass_flow_output
for pass_flow_output in outputs.values():
# k:v => pass_flow: output
for output in pass_flow_output.values():
output_metrics = output["metrics"]
for item in output_metrics.values():
assert item.value > 0


def patch_config(config_json_path: str, search_algorithm: str, execution_order: str, system: str, is_gpu: bool = False):
"""Load the config json file and patch it with the given search algorithm, execution order and system."""
with open(config_json_path, "r") as fin:
Expand Down
71 changes: 25 additions & 46 deletions olive/engine/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,6 @@ def run(
output_dir.mkdir(parents=True, exist_ok=True)

outputs = {}
pf_footprints = {}

for accelerator_spec in self.accelerator_specs:
with self.create_managed_environment(accelerator_spec):
Expand All @@ -336,37 +335,21 @@ def run(
if run_result is None:
continue

if evaluate_input_model and not self.passes:
# for evaluate input model only, return the evaluation results
# TODO: need check whether the evaluation results are valid since it will only evaluate input model
# once and use the same evaluation results for all accelerators
outputs[accelerator_spec] = run_result
elif self.no_search:
output, model_ids = run_result
if output:
outputs[accelerator_spec] = output
pf_footprints[accelerator_spec] = self.footprints[accelerator_spec].get_footprints_by_model_ids(
model_ids
)
else:
outputs[accelerator_spec] = run_result
pf_footprints[accelerator_spec] = run_result

if not self.passes:
# no passes registered, return the evaluation results
return outputs
outputs[accelerator_spec] = run_result

for accelerator_spec in self.footprints.keys():
logger.info(f"Run history for {accelerator_spec}:")
run_history = self.footprints[accelerator_spec].summarize_run_history()
self.dump_run_history(run_history, output_dir / f"run_history_{accelerator_spec}.txt")

if packaging_config:
logger.info(f"Package top ranked {sum([len(f.nodes) for f in pf_footprints.values()])} models as artifacts")
if packaging_config and self.passes:
# TODO: should we support package input model?
# TODO: do you support packaging pytorch models?
logger.info(f"Package top ranked {sum([len(f.nodes) for f in outputs.values()])} models as artifacts")
generate_output_artifacts(
packaging_config,
self.footprints,
pf_footprints,
outputs,
output_dir,
)
else:
Expand Down Expand Up @@ -493,13 +476,13 @@ def run_no_search(
self.search_strategy.initialize(self.pass_flows_search_spaces, input_model_id, objective_dict)

iter_num = 0
flows_output = {}
output_model_ids = []
output_models = {}
while True:
iter_num += 1

# get the next step
next_step = self.search_strategy.next_step()

if iter_num == 1:
assert next_step is not None, "Search strategy returned None for the first step"
# if no more steps, break
Expand All @@ -518,16 +501,15 @@ def run_no_search(
logger.debug(f"Step no search with search point {next_step['search_point']} ...")

# run all the passes in the step
(
should_prune,
signal,
model_ids,
) = self._run_passes(next_step["passes"], model_config, model_id, data_root, accelerator_spec)
pass_flow = self.pass_flows[iter_num - 1]
should_prune, signal, model_ids = self._run_passes(
next_step["passes"], model_config, model_id, data_root, accelerator_spec
)

pass_flow = self.pass_flows[iter_num - 1]
if should_prune:
failed_pass = pass_flow[len(model_ids)]
logger.warning(f"Flow {pass_flow} is pruned due to failed or invalid config for pass '{failed_pass}'")
continue

# names of the output models of the passes
pass_output_names = [self.passes[pass_name]["output_name"] for pass_name, _ in next_step["passes"]]
Expand All @@ -547,7 +529,6 @@ def run_no_search(
pass_output_names[-1] = final_output_name

output_model_json = None
output = {}
for pass_output_name, pass_output_model_id in zip(pass_output_names, model_ids):
if not pass_output_name:
continue
Expand All @@ -558,23 +539,21 @@ def run_no_search(
overwrite=True,
cache_dir=self._config.cache_dir,
)
output_model_ids.append(pass_output_model_id)
output_models[pass_output_model_id] = output_model_json

# save the evaluation results to output_dir
if signal is not None:
results_path = output_dir_with_pf / f"{final_output_name}_metrics.json"
with open(results_path, "w") as f:
json.dump(signal.to_json(), f, indent=4)

if output_model_json and not should_prune:
# output_model_json is the last model only if the flow is not pruned
output["model"] = output_model_json
if signal is not None:
output["metrics"] = signal
else:
output = None
flows_output[tuple(pass_flow)] = output
return flows_output, output_model_ids
output_model_ids = list(output_models.keys())
fp_outputs = self.footprints[accelerator_spec].create_footprints_by_model_ids(output_model_ids)
# update the output model config
for model_id, model_config in output_models.items():
fp_outputs.nodes[model_id].model_config = model_config

return fp_outputs

def run_search(
self,
Expand Down Expand Up @@ -640,14 +619,14 @@ def run_search(

self.footprints[accelerator_spec].to_file(output_dir / f"{prefix_output_name}footprints.json")

return self.get_pareto_frontier_footprints(
return self.create_pareto_frontier_footprints(
accelerator_spec, output_model_num, objective_dict, output_dir, prefix_output_name
)

def get_pareto_frontier_footprints(
def create_pareto_frontier_footprints(
self, accelerator_spec, output_model_num, objective_dict, output_dir, prefix_output_name
):
pf_footprints = self.footprints[accelerator_spec].get_pareto_frontier()
pf_footprints = self.footprints[accelerator_spec].create_pareto_frontier()
if output_model_num is None or len(pf_footprints.nodes) <= output_model_num:
logger.info(f"Output all {len(pf_footprints.nodes)} models")
else:
Expand Down Expand Up @@ -959,7 +938,7 @@ def _run_passes(
)
if model_config in PRUNED_CONFIGS:
should_prune = True
logger.debug("Pruned")
logger.debug(f"Pruned for pass {pass_id}")
break
model_ids.append(model_id)

Expand Down
13 changes: 8 additions & 5 deletions olive/engine/footprint.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import logging
from collections import OrderedDict, defaultdict, namedtuple
from copy import deepcopy
from typing import DefaultDict, Dict

from olive.common.config_utils import ConfigBase, config_json_dumps, config_json_loads
Expand Down Expand Up @@ -153,20 +154,22 @@ def mark_pareto_frontier(self):
self.nodes[k].is_pareto_frontier = cmp_flag
self.is_marked_pareto_frontier = True

def get_footprints_by_model_ids(self, model_ids):
def create_footprints_by_model_ids(self, model_ids):
nodes = OrderedDict()
for model_id in model_ids:
nodes[model_id] = self.nodes[model_id]
return Footprint(nodes=nodes, objective_dict=self.objective_dict, is_marked_pareto_frontier=True)
nodes[model_id] = deepcopy(self.nodes[model_id])
return Footprint(nodes=nodes, objective_dict=deepcopy(self.objective_dict))

def get_pareto_frontier(self):
def create_pareto_frontier(self):
self.mark_pareto_frontier()
rls = {k: v for k, v in self.nodes.items() if v.is_pareto_frontier}
for _, v in rls.items():
logger.info(f"pareto frontier points: {v.model_id} \n{v.metrics.value}")

# restructure the pareto frontier points to instance of Footprints node for further analysis
return Footprint(nodes=rls, objective_dict=self.objective_dict, is_marked_pareto_frontier=True)
return Footprint(
nodes=deepcopy(rls), objective_dict=deepcopy(self.objective_dict), is_marked_pareto_frontier=True
)

def update_nodes(self, nodes):
node_dict = OrderedDict()
Expand Down
16 changes: 9 additions & 7 deletions test/multiple_ep/test_aml_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from olive.engine import Engine
from olive.evaluator.olive_evaluator import OliveEvaluatorConfig
from olive.hardware import Device
from olive.hardware.accelerator import AcceleratorSpec
from olive.hardware.accelerator import DEFAULT_CPU_ACCELERATOR, AcceleratorSpec
from olive.model import ModelConfig
from olive.passes.onnx import OrtPerfTuning

Expand Down Expand Up @@ -58,9 +58,11 @@ def test_run_pass_evaluate(self):
engine = Engine(options, target=self.system, host=self.system, evaluator_config=evaluator_config)
engine.register(OrtPerfTuning)
output = engine.run(self.input_model_config, output_dir=output_dir)
cpu_res = output[AcceleratorSpec(accelerator_type=Device.CPU, execution_provider="CPUExecutionProvider")]
openvino_res = output[
AcceleratorSpec(accelerator_type=Device.CPU, execution_provider="OpenVINOExecutionProvider")
]
assert cpu_res[tuple(engine.pass_flows[0])]["metrics"]["latency-avg"]
assert openvino_res[tuple(engine.pass_flows[0])]["metrics"]["latency-avg"]
cpu_res = list(output[DEFAULT_CPU_ACCELERATOR].nodes.values())[0]
openvino_res = list(
output[
AcceleratorSpec(accelerator_type=Device.CPU, execution_provider="OpenVINOExecutionProvider")
].nodes.values()
)[0]
assert cpu_res.metrics.value.__root__
assert openvino_res.metrics.value.__root__
Loading

0 comments on commit 38bd1d6

Please sign in to comment.