Skip to content

Commit

Permalink
Test with a model
Browse files Browse the repository at this point in the history
  • Loading branch information
daniil-lyakhov committed May 11, 2023
1 parent acb81ea commit 7981448
Show file tree
Hide file tree
Showing 6 changed files with 96 additions and 7 deletions.
63 changes: 62 additions & 1 deletion tests/common/test_statistics_aggregator.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
from abc import abstractmethod
from dataclasses import dataclass
from enum import Enum
from typing import Any, Type, Union
from itertools import product
from typing import Any, List, Type, Union

import numpy as np
import pytest
Expand All @@ -24,6 +25,9 @@
from nncf.common.quantization.structs import QuantizerConfig
from nncf.common.tensor_statistics.statistic_point import StatisticPoint
from nncf.common.tensor_statistics.statistic_point import StatisticPointsContainer
from nncf.experimental.common.tensor_statistics.collectors import NoopAggregator
from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
from nncf.experimental.common.tensor_statistics.collectors import TensorReducerBase
from nncf.quantization.algorithms.bias_correction.backend import BiasCorrectionAlgoBackend
from nncf.quantization.algorithms.fast_bias_correction.backend import FastBiasCorrectionAlgoBackend
from nncf.quantization.algorithms.min_max.backend import MinMaxAlgoBackend
Expand All @@ -34,6 +38,7 @@
from nncf.quantization.range_estimator import StatisticsType


# pylint: disable=too-many-public-methods
class TemplateTestStatisticsAggregator:
@abstractmethod
def get_min_max_algo_backend_cls(self) -> Type[MinMaxAlgoBackend]:
Expand Down Expand Up @@ -95,6 +100,10 @@ def inplace_statistics(self) -> bool:
def is_backend_support_custom_estimators(self) -> bool:
pass

@abstractmethod
def reducers_map(self) -> List[TensorReducerBase]:
pass

@pytest.fixture
def dataset_values(self):
return [{"max": 1, "min": -10}, {"max": 0.1, "min": -1}, {"max": 128, "min": -128}]
Expand Down Expand Up @@ -799,3 +808,55 @@ def test_statistic_merging(self, test_params, key, dataset_samples, inplace_stat
if isinstance(ref[0], np.ndarray):
assert stat.min_values.shape == ref[0].shape
assert stat.max_values.shape == ref[1].shape

@pytest.mark.parametrize(
"statistics_type",
[
StatisticsType.MIN,
StatisticsType.MAX,
StatisticsType.ABS_MAX,
StatisticsType.MEAN,
StatisticsType.QUANTILE,
StatisticsType.ABS_QUANTILE,
"batch_mean",
"mean_per_ch",
],
)
def test_same_collectors_different_attrs_dont_merge(self, statistics_type, test_params, dataset_samples):
params = test_params["test_statistic_merging"]["split_concat"]
model = params["model"](dataset_samples)
params = {}
if statistics_type in [StatisticsType.MIN, StatisticsType.MAX, StatisticsType.ABS_MAX, StatisticsType.MEAN]:
params["reduction_shape"] = [None, (0, 1, 3), (1, 2, 3)]
params["inplace"] = [False, True]
elif statistics_type in [StatisticsType.QUANTILE, StatisticsType.ABS_QUANTILE]:
params["reduction_shape"] = [None, (0, 1, 3), (1, 2, 3)]
params["quantile"] = [[0.01, 0.99], [0.001, 0.999]]
elif statistics_type == "batch_mean":
params["inplace"] = [False, True]
elif statistics_type == "mean_per_ch":
params["inplace"] = [False, True]
params["channel_dim"] = [1, 2]

def product_dict(**kwargs):
keys = kwargs.keys()
for instance in product(*kwargs.values()):
yield dict(zip(keys, instance))

tensor_collector = TensorCollector()
statistics_points = StatisticPointsContainer()
target_point_cls = self.get_target_point_cls()
target_point_args = (TargetType.POST_LAYER_OPERATION, "split", 0)
for params_ in product_dict(**params):
reducer = self.reducers_map()[statistics_type](**params_)
aggregator = NoopAggregator(1)
tensor_collector.register_statistic_branch(str(params_), reducer, aggregator)
target_point = target_point_cls(*target_point_args)
stat_point = StatisticPoint(target_point, tensor_collector, "TEST")
statistics_points.add_statistic_point(stat_point)

dataset = self.get_dataset(dataset_samples)
statistics_aggregator = self.get_statistics_aggregator(dataset)
statistics_aggregator.register_statistic_points(statistics_points)
# Run statistic collection to check output names matches reduer names
statistics_aggregator.collect_statistics(model)
6 changes: 4 additions & 2 deletions tests/experimental/common/test_reducers_and_aggregators.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,11 +229,12 @@ def test_mean_median_agggregators(self, aggregator_cls, refs, tensor_processor,
"reducer_name",
["noop", "min", "max", "abs_max", "mean", "quantile", "abs_quantile", "batch_mean", "mean_per_ch"],
)
def test_reducers_hash_equal(self, reducer_name, reducers):
def test_reducers_name_hash_equal(self, reducer_name, reducers):
if reducer_name == "noop":
reducers_instances = [reducers[reducer_name]() for _ in range(2)]
assert hash(reducers_instances[0]) == hash(reducers_instances[1])
assert reducers_instances[0] == reducers_instances[1]
assert reducers_instances[0].name == reducers_instances[1].name
assert len(set(reducers_instances)) == 1
return

Expand Down Expand Up @@ -266,7 +267,8 @@ def product_dict(**kwargs):
reducers_instances.append(reducer_cls(**params_))

assert len(set(reducers_instances)) == len(reducers_instances)
assert all(hash(reducers_instances[0]) != instns for instns in reducers_instances[1:])
assert len({hash(reducer) for reducer in reducers_instances}) == len(reducers_instances)
assert len({reducer.name for reducer in reducers_instances}) == len(reducers_instances)

hashes = [hash(reducer) for reducer in reducers_instances]
test_input = [self.get_nncf_tensor(np.empty((1, 3, 4, 4)))]
Expand Down
10 changes: 9 additions & 1 deletion tests/onnx/test_statistics_aggregator.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Type
from typing import List, Type

import numpy as np
import pytest

from nncf import Dataset
from nncf.common.graph.transformations.commands import TargetType
from nncf.experimental.common.tensor_statistics.collectors import TensorReducerBase
from nncf.onnx.graph.transformations.commands import ONNXTargetPoint
from nncf.onnx.statistics.aggregator import ONNXStatisticsAggregator
from nncf.quantization.algorithms.bias_correction.onnx_backend import ONNXBiasCorrectionAlgoBackend
Expand Down Expand Up @@ -75,6 +76,9 @@ def get_target_point(self, target_type: TargetType):
def get_target_point_cls(self):
return ONNXTargetPoint

def reducers_map(self) -> List[TensorReducerBase]:
return None

@pytest.fixture
def dataset_samples(self, dataset_values):
input_shape = INPUT_SHAPE
Expand All @@ -98,6 +102,10 @@ def inplace_statistics(self, request) -> bool:
def test_statistics_merging_simple(self, dataset_samples, inplace_statistics):
pass

@pytest.mark.skip("Merging is not implemented yet")
def test_same_collectors_different_attrs_dont_merge(self, statistics_type, test_params, dataset_samples):
pass

@pytest.mark.skip("Merging is not implemented yet")
def test_statistic_merging(self, dataset_samples, inplace_statistics):
pass
11 changes: 10 additions & 1 deletion tests/openvino/native/test_statistics_aggregator.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Type
from typing import List, Type

import numpy as np
import openvino.runtime as ov
Expand All @@ -19,8 +19,12 @@
from nncf import Dataset
from nncf.common.graph.transformations.commands import TargetPoint
from nncf.common.graph.transformations.commands import TargetType
from nncf.experimental.common.tensor_statistics.collectors import TensorReducerBase
from nncf.openvino.graph.transformations.commands import OVTargetPoint
from nncf.openvino.statistics.aggregator import OVStatisticsAggregator
from nncf.openvino.statistics.collectors import OV_REDUCERS_MAP
from nncf.openvino.statistics.collectors import OVBatchMeanReducer
from nncf.openvino.statistics.collectors import OVMeanPerChanelReducer
from nncf.quantization.algorithms.bias_correction.openvino_backend import OVBiasCorrectionAlgoBackend
from nncf.quantization.algorithms.fast_bias_correction.openvino_backend import OVFastBiasCorrectionAlgoBackend
from nncf.quantization.algorithms.min_max.openvino_backend import OVMinMaxAlgoBackend
Expand Down Expand Up @@ -117,3 +121,8 @@ def _get_shared_conv_model(self, dataset_samples):
sample = dataset_samples[0].reshape(INPUT_SHAPE[1:])
conv_w = self.dataset_samples_to_conv_w(sample)
return SharedConvModel(input_name=INPUT_NAME, input_shape=INPUT_SHAPE, kernel=conv_w).ov_model

def reducers_map(self) -> List[TensorReducerBase]:
map_ = OV_REDUCERS_MAP.copy()
map_.update({"batch_mean": OVBatchMeanReducer, "mean_per_ch": OVMeanPerChanelReducer})
return map_
3 changes: 2 additions & 1 deletion tests/post_training/test_ptq_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,8 @@ def test_model_type_pass(self, test_params, model_type):
],
)
def test_quantization_points_overflow_fix(self, overflow_fix, affected_target_points, ignored_ops):
# Checks the return value of _get_quantization_points_overflow_fix based on the overflow_fix and weight target points.
# Checks the return value of _get_quantization_points_overflow_fix
# based on the overflow_fix and weight target points.
model = ModelToTestOverflowFix(self.metatypes_mapping)
nncf_graph = model.nncf_graph

Expand Down
10 changes: 9 additions & 1 deletion tests/torch/test_statistics_aggregator.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Type
from typing import List, Type

import numpy as np
import pytest
Expand All @@ -18,6 +18,7 @@

from nncf import Dataset
from nncf.common.graph.transformations.commands import TargetType
from nncf.experimental.common.tensor_statistics.collectors import TensorReducerBase
from nncf.quantization.algorithms.min_max.torch_backend import PTMinMaxAlgoBackend
from nncf.torch.graph.graph import PTTargetPoint
from nncf.torch.statistics.aggregator import PTStatisticsAggregator
Expand Down Expand Up @@ -86,6 +87,9 @@ def get_target_point(self, target_type: TargetType):
def get_target_point_cls(self):
return PTTargetPoint

def reducers_map(self) -> List[TensorReducerBase]:
return None

@pytest.fixture
def dataset_samples(self, dataset_values):
input_shape = INPUT_SHAPE
Expand Down Expand Up @@ -113,6 +117,10 @@ def test_statistics_merging_simple(self, dataset_samples, inplace_statistics):
def test_statistic_merging(self, dataset_samples, inplace_statistics):
pass

@pytest.mark.skip("Merging is not implemented yet")
def test_same_collectors_different_attrs_dont_merge(self, statistics_type, test_params, dataset_samples):
pass

@pytest.mark.skip("Bias correction and Fast bias correction is not implemented yet")
def test_statistics_aggregator_bias_correction(
self, dataset_samples, test_params, inplace_statistics, is_stat_in_shape_of_scale
Expand Down

0 comments on commit 7981448

Please sign in to comment.