Skip to content

Commit

Permalink
- Fix tests mp config function name
Browse files Browse the repository at this point in the history
- added tests for MP distance weighting with last layer
- fix mixed precision test name
  • Loading branch information
Ofir Gordon authored and Ofir Gordon committed Mar 10, 2024
1 parent 7e1508b commit faef3d4
Show file tree
Hide file tree
Showing 9 changed files with 47 additions and 23 deletions.
4 changes: 2 additions & 2 deletions tests/common_tests/base_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,13 @@ def get_input_shapes(self):

def get_core_config(self):
return CoreConfig(quantization_config=self.get_quantization_config(),
mixed_precision_config=self.get_mixed_precision_v2_config(),
mixed_precision_config=self.get_mixed_precision_config(),
debug_config=self.get_debug_config())

def get_quantization_config(self):
return QuantizationConfig()

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return None

def get_debug_config(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def get_tpc(self):
mp_bitwidth_candidates_list=self.mixed_precision_candidates_list,
name="mp_bopts_test")

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return MixedPrecisionQuantizationConfig(num_of_images=1)

def get_input_shapes(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def get_quantization_config(self):
input_scaling=False,
activation_channel_equalization=False)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(num_of_images=1)

def get_input_shapes(self):
Expand Down Expand Up @@ -422,7 +422,7 @@ def get_quantization_config(self):
relu_bound_to_power_of_2=False, weights_bias_correction=True,
input_scaling=False, activation_channel_equalization=False)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(num_of_images=self.num_of_inputs)

def create_networks(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def get_quantization_config(self):
relu_bound_to_power_of_2=True, weights_bias_correction=True,
input_scaling=True, activation_channel_equalization=True)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return MixedPrecisionQuantizationConfig()

def create_networks(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@
import tensorflow as tf

from model_compression_toolkit.defaultdict import DefaultDict
from model_compression_toolkit.core.common.mixed_precision.distance_weighting import get_last_layer_weights
from model_compression_toolkit.core.common.mixed_precision.distance_weighting import get_last_layer_weights, \
MpDistanceWeighting
from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, KERAS_KERNEL, BIAS_ATTR, BIAS
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs, generate_keras_tpc
from tests.common_tests.helpers.generate_test_tp_model import generate_test_op_qc, generate_test_attr_configs
Expand All @@ -37,14 +38,14 @@

class MixedPercisionBaseTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test, val_batch_size=1):
super().__init__(unit_test, val_batch_size=val_batch_size )
super().__init__(unit_test, val_batch_size=val_batch_size)

def get_quantization_config(self):
return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.MSE, mct.core.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=True, weights_bias_correction=True,
input_scaling=True, activation_channel_equalization=True)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(num_of_images=1)

def get_input_shapes(self):
Expand Down Expand Up @@ -80,7 +81,7 @@ def get_quantization_config(self):
relu_bound_to_power_of_2=True, weights_bias_correction=True,
input_scaling=True, activation_channel_equalization=True)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig()

def get_kpi(self):
Expand All @@ -96,14 +97,20 @@ def compare(self, quantized_model, float_model, input_x=None, quantization_info=
self.unit_test.assertTrue(np.unique(conv_layers[1].weights[0]).flatten().shape[0] <= 8)


class MixedPercisionSearchTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
class MixedPrecisionSearchTest(MixedPercisionBaseTest):
def __init__(self, unit_test, distance_metric=MpDistanceWeighting.AVG):
super().__init__(unit_test, val_batch_size=2)

self.distance_metric = distance_metric

def get_kpi(self):
# kpi is infinity -> should give best model - 8bits
return KPI(np.inf)

def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(num_of_images=1,
distance_weighting_method=self.distance_metric)

def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
conv_layers = get_layers_from_model_by_type(quantized_model, layers.Conv2D)
assert (quantization_info.mixed_precision_cfg == [0,
Expand Down Expand Up @@ -224,7 +231,7 @@ class MixedPercisionCombinedNMSTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(num_of_images=1,
use_hessian_based_scores=False)

Expand Down Expand Up @@ -365,7 +372,7 @@ def get_quantization_config(self):
relu_bound_to_power_of_2=False, weights_bias_correction=False,
input_scaling=False, activation_channel_equalization=False)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig()


Expand All @@ -381,7 +388,7 @@ def get_quantization_config(self):
input_scaling=False,
activation_channel_equalization=False)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(num_of_images=1)

def get_tpc(self):
Expand Down Expand Up @@ -413,7 +420,7 @@ class MixedPercisionSearchLastLayerDistanceTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test, val_batch_size=2)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(num_of_images=1,
distance_weighting_method=get_last_layer_weights,
use_hessian_based_scores=False)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,11 @@

import numpy as np
import tensorflow as tf
from sklearn.metrics.pairwise import distance_metrics
from tensorflow.keras.layers import PReLU, ELU

from model_compression_toolkit.core import QuantizationErrorMethod
from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting
from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
from model_compression_toolkit.gptq import RoundingType
from tests.keras_tests.feature_networks_tests.feature_networks.activation_decomposition_test import \
Expand Down Expand Up @@ -122,7 +124,7 @@
from tests.keras_tests.feature_networks_tests.feature_networks.uniform_range_selection_activation_test import \
UniformRangeSelectionActivationTest, UniformRangeSelectionBoundedActivationTest
from tests.keras_tests.feature_networks_tests.feature_networks.weights_mixed_precision_tests import \
MixedPercisionSearchTest, MixedPercisionDepthwiseTest, \
MixedPrecisionSearchTest, MixedPercisionDepthwiseTest, \
MixedPercisionSearchKPI4BitsAvgTest, MixedPercisionSearchKPI2BitsAvgTest, MixedPrecisionActivationDisabled, \
MixedPercisionSearchLastLayerDistanceTest, MixedPercisionSearchActivationKPINonConfNodesTest, \
MixedPercisionSearchTotalKPINonConfNodesTest, MixedPercisionSearchPartWeightsLayersTest, MixedPercisionCombinedNMSTest
Expand Down Expand Up @@ -202,7 +204,8 @@ def test_mixed_precision_search_kpi_4bits_avg_nms(self):
MixedPercisionCombinedNMSTest(self).run_test()

def test_mixed_precision_search(self):
MixedPercisionSearchTest(self).run_test()
MixedPrecisionSearchTest(self, distance_metric=MpDistanceWeighting.AVG).run_test()
MixedPrecisionSearchTest(self, distance_metric=MpDistanceWeighting.LAST_LAYER).run_test()

def test_mixed_precision_for_part_weights_layers(self):
MixedPercisionSearchPartWeightsLayersTest(self).run_test()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def __init__(self, unit_test):
def get_kpi(self):
return KPI(np.inf, np.inf)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return MixedPrecisionQuantizationConfig(num_of_images=4)

def create_feature_network(self, input_shape):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@

from model_compression_toolkit.defaultdict import DefaultDict
from model_compression_toolkit.core import KPI
from model_compression_toolkit.core.common.mixed_precision.distance_weighting import get_last_layer_weights
from model_compression_toolkit.core.common.mixed_precision.distance_weighting import get_last_layer_weights, \
MpDistanceWeighting
from model_compression_toolkit.core.common.user_info import UserInformation
from model_compression_toolkit.core.pytorch.constants import BIAS
from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, PYTORCH_KERNEL, BIAS_ATTR
Expand Down Expand Up @@ -79,12 +80,23 @@ def compare_results(self, quantization_info, quantized_models, float_model, expe


class MixedPercisionSearch8Bit(MixedPercisionBaseTest):
def __init__(self, unit_test):
def __init__(self, unit_test, distance_metric=MpDistanceWeighting.AVG):
super().__init__(unit_test)

self.distance_metric = distance_metric

def get_kpi(self):
return KPI(np.inf)

def get_core_configs(self):
qc = mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.MSE, mct.core.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=False, weights_bias_correction=True,
input_scaling=False, activation_channel_equalization=False)
mpc = mct.core.MixedPrecisionQuantizationConfig(num_of_images=1,
distance_weighting_method=self.distance_metric)

return {"mixed_precision_model": mct.core.CoreConfig(quantization_config=qc, mixed_precision_config=mpc)}

def compare(self, quantized_models, float_model, input_x=None, quantization_info=None):
self.compare_results(quantization_info, quantized_models, float_model, 0)

Expand Down Expand Up @@ -217,7 +229,7 @@ def __init__(self, unit_test):
def get_kpi(self):
return KPI(192)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(num_of_images=1,
use_hessian_based_scores=False,
distance_weighting_method=get_last_layer_weights)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import torch
from torch import nn
import model_compression_toolkit as mct
from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting
from model_compression_toolkit.gptq.common.gptq_config import RoundingType
from tests.pytorch_tests.model_tests.feature_models.add_net_test import AddNetTest
from tests.pytorch_tests.model_tests.feature_models.bn_attributes_quantization_test import BNAttributesQuantization
Expand Down Expand Up @@ -394,7 +395,8 @@ def test_mixed_precision_8bit(self):
"""
This test checks the Mixed Precision search.
"""
MixedPercisionSearch8Bit(self).run_test()
MixedPercisionSearch8Bit(self, distance_metric=MpDistanceWeighting.AVG).run_test()
MixedPercisionSearch8Bit(self, distance_metric=MpDistanceWeighting.LAST_LAYER).run_test()

def test_mixed_precision_part_weights_layers(self):
"""
Expand Down

0 comments on commit faef3d4

Please sign in to comment.