Skip to content

Commit

Permalink
Remove folder "target_platform"
Browse files Browse the repository at this point in the history
  • Loading branch information
liord committed Jan 13, 2025
1 parent 05e1eaa commit 2bc3e95
Show file tree
Hide file tree
Showing 15 changed files with 36 additions and 27 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,11 @@
from model_compression_toolkit.constants import TENSORFLOW
from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization_data import compute_resource_utilization_data
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
AttachTpcToKeras
from model_compression_toolkit.verify_packages import FOUND_TF

if FOUND_TF:
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
AttachTpcToKeras
from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
Expand Down
4 changes: 2 additions & 2 deletions model_compression_toolkit/pruning/keras/pruning_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@
from model_compression_toolkit import get_target_platform_capabilities
from model_compression_toolkit.constants import TENSORFLOW
from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
AttachTpcToKeras
from model_compression_toolkit.verify_packages import FOUND_TF
from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
from model_compression_toolkit.core.common.pruning.pruner import Pruner
Expand All @@ -32,6 +30,8 @@
from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL

if FOUND_TF:
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
AttachTpcToKeras
from model_compression_toolkit.core.keras.back2framework.float_model_builder import FloatKerasModelBuilder
from model_compression_toolkit.core.keras.pruning.pruning_keras_implementation import PruningKerasImplementation
from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
Expand Down
4 changes: 2 additions & 2 deletions model_compression_toolkit/ptq/keras/quantization_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@
from model_compression_toolkit.logger import Logger
from model_compression_toolkit.constants import TENSORFLOW
from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
AttachTpcToKeras
from model_compression_toolkit.verify_packages import FOUND_TF
from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
Expand All @@ -34,6 +32,8 @@
from model_compression_toolkit.metadata import create_model_metadata

if FOUND_TF:
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
AttachTpcToKeras
from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
from model_compression_toolkit.core.keras.keras_model_validation import KerasModelValidation
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2fw import \
AttachTpcToFramework
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attribute_filter import Eq


class AttachTpcToPytorch(AttachTpcToFramework):
Expand Down
1 change: 1 addition & 0 deletions tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import model_compression_toolkit as mct
import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
from mct_quantizers import QuantizationMethod
from model_compression_toolkit.constants import FLOAT_BITWIDTH
from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
IMX500_TP_MODEL
Expand Down
1 change: 1 addition & 0 deletions tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import model_compression_toolkit as mct
import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
from mct_quantizers import QuantizationMethod
from model_compression_toolkit.constants import FLOAT_BITWIDTH
from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
Expand Down
1 change: 1 addition & 0 deletions tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import model_compression_toolkit as mct
import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
from mct_quantizers import QuantizationMethod
from model_compression_toolkit.constants import FLOAT_BITWIDTH
from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
IMX500_TP_MODEL
Expand Down
1 change: 1 addition & 0 deletions tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import model_compression_toolkit as mct
import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
from mct_quantizers import QuantizationMethod
from model_compression_toolkit.constants import FLOAT_BITWIDTH
from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
Expand Down
1 change: 1 addition & 0 deletions tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import model_compression_toolkit as mct
import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
from mct_quantizers import QuantizationMethod
from model_compression_toolkit.constants import FLOAT_BITWIDTH
from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
Expand Down
1 change: 1 addition & 0 deletions tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import model_compression_toolkit as mct
import model_compression_toolkit.target_platform_capabilities.schema.v1 as schema
from mct_quantizers import QuantizationMethod
from model_compression_toolkit.constants import FLOAT_BITWIDTH
from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
IMX500_TP_MODEL
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import tensorflow as tf

import model_compression_toolkit as mct
from mct_quantizers import KerasActivationQuantizationHolder
from mct_quantizers import KerasActivationQuantizationHolder, QuantizationMethod
from model_compression_toolkit.core.common.network_editors.actions import ChangeCandidatesActivationQuantConfigAttr, \
ChangeQuantizationParamFunction, EditRule, ChangeCandidatesWeightsQuantConfigAttr
from model_compression_toolkit.core.common.network_editors.node_filters import NodeNameFilter, NodeNameScopeFilter, \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import tensorflow as tf
import numpy as np

from mct_quantizers import KerasActivationQuantizationHolder
from mct_quantizers import KerasActivationQuantizationHolder, QuantizationMethod
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import numpy as np


from mct_quantizers import KerasActivationQuantizationHolder
from mct_quantizers import KerasActivationQuantizationHolder, QuantizationMethod
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
Expand Down
37 changes: 19 additions & 18 deletions tests/pytorch_tests/function_tests/test_pytorch_tpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@
from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \
TFLITE_TP_MODEL, QNNPACK_TP_MODEL, KERNEL_ATTR, WEIGHTS_N_BITS, PYTORCH_KERNEL, BIAS_ATTR, BIAS
from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams, \
OperationsSetToLayers
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attribute_filter import Greater, \
Smaller, Eq
from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
Expand Down Expand Up @@ -114,13 +115,13 @@ def test_qco_by_pytorch_layer(self):

tpc_pytorch = FrameworkQuantizationCapabilities(tpm)
with tpc_pytorch:
tp.OperationsSetToLayers("conv", [torch.nn.Conv2d],
OperationsSetToLayers("conv", [torch.nn.Conv2d],
attr_mapping={KERNEL_ATTR: DefaultDict(default_value=PYTORCH_KERNEL),
BIAS_ATTR: DefaultDict(default_value=BIAS)})
tp.OperationsSetToLayers("tanh", [torch.tanh])
tp.OperationsSetToLayers("avg_pool2d_kernel_2",
OperationsSetToLayers("tanh", [torch.tanh])
OperationsSetToLayers("avg_pool2d_kernel_2",
[LayerFilterParams(torch.nn.functional.avg_pool2d, kernel_size=2)])
tp.OperationsSetToLayers("avg_pool2d",
OperationsSetToLayers("avg_pool2d",
[torch.nn.functional.avg_pool2d])

conv_node = get_node(torch.nn.Conv2d(3, 3, (1, 1)))
Expand Down Expand Up @@ -157,7 +158,7 @@ def test_get_layers_by_op(self):
fw_tp = FrameworkQuantizationCapabilities(hm)
with fw_tp:
opset_layers = [torch.nn.Conv2d, LayerFilterParams(torch.nn.Softmax, dim=1)]
tp.OperationsSetToLayers('opsetA', opset_layers)
OperationsSetToLayers('opsetA', opset_layers)
self.assertEqual(fw_tp.get_layers_by_opset_name('opsetA'), opset_layers)
self.assertEqual(fw_tp.get_layers_by_opset(op_obj), opset_layers)

Expand All @@ -178,8 +179,8 @@ def test_get_layers_by_opconcat(self):
with fw_tp:
opset_layers_a = [torch.nn.Conv2d]
opset_layers_b = [LayerFilterParams(torch.nn.Softmax, dim=1)]
tp.OperationsSetToLayers('opsetA', opset_layers_a)
tp.OperationsSetToLayers('opsetB', opset_layers_b)
OperationsSetToLayers('opsetA', opset_layers_a)
OperationsSetToLayers('opsetB', opset_layers_b)

self.assertEqual(fw_tp.get_layers_by_opset(op_concat), opset_layers_a + opset_layers_b)

Expand All @@ -197,8 +198,8 @@ def test_layer_attached_to_multiple_opsets(self):
fw_tp = FrameworkQuantizationCapabilities(hm)
with self.assertRaises(Exception) as e:
with fw_tp:
tp.OperationsSetToLayers('opsetA', [torch.nn.Conv2d])
tp.OperationsSetToLayers('opsetB', [torch.nn.Conv2d])
OperationsSetToLayers('opsetA', [torch.nn.Conv2d])
OperationsSetToLayers('opsetB', [torch.nn.Conv2d])
self.assertEqual('Found layer Conv2d in more than one OperatorsSet', str(e.exception))

def test_filter_layer_attached_to_multiple_opsets(self):
Expand All @@ -214,8 +215,8 @@ def test_filter_layer_attached_to_multiple_opsets(self):
fw_tp = FrameworkQuantizationCapabilities(hm)
with self.assertRaises(Exception) as e:
with fw_tp:
tp.OperationsSetToLayers('opsetA', [LayerFilterParams(torch.nn.Softmax, dim=2)])
tp.OperationsSetToLayers('opsetB', [LayerFilterParams(torch.nn.Softmax, dim=2)])
OperationsSetToLayers('opsetA', [LayerFilterParams(torch.nn.Softmax, dim=2)])
OperationsSetToLayers('opsetB', [LayerFilterParams(torch.nn.Softmax, dim=2)])
self.assertEqual('Found layer Softmax(dim=2) in more than one OperatorsSet', str(e.exception))

# TODO: need to test as part of attach to fw tests
Expand All @@ -227,10 +228,10 @@ def test_filter_layer_attached_to_multiple_opsets(self):
# tpc_platform_type=None,
# operator_set=tuple([schema.OperatorsSet(name="opA")]),
# add_metadata=False)
# hm_pytorch = tp.FrameworkQuantizationCapabilities(hm)
# hm_pytorch = FrameworkQuantizationCapabilities(hm)
# with self.assertRaises(Exception) as e:
# with hm_pytorch:
# tp.OperationsSetToLayers("conv", [torch.nn.Conv2d])
# OperationsSetToLayers("conv", [torch.nn.Conv2d])
# self.assertEqual(
# 'conv is not defined in the target platform model that is associated with the target platform capabilities.',
# str(e.exception))
Expand All @@ -252,11 +253,11 @@ def test_pytorch_fusing_patterns(self):
fusing_patterns=tuple(fusing_patterns),
add_metadata=False)

hm_keras = tp.FrameworkQuantizationCapabilities(hm)
hm_keras = FrameworkQuantizationCapabilities(hm)
with hm_keras:
tp.OperationsSetToLayers("opA", [torch.conv2d])
tp.OperationsSetToLayers("opB", [torch.tanh])
tp.OperationsSetToLayers("opC", [LayerFilterParams(torch.relu, Greater("max_value", 7), negative_slope=0)])
OperationsSetToLayers("opA", [torch.conv2d])
OperationsSetToLayers("opB", [torch.tanh])
OperationsSetToLayers("opC", [LayerFilterParams(torch.relu, Greater("max_value", 7), negative_slope=0)])

fusings = hm_keras.get_fusing_patterns()
self.assertEqual(len(fusings), 2)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import torch
import numpy as np
import model_compression_toolkit as mct
from mct_quantizers import QuantizationMethod
from model_compression_toolkit.core.common.network_editors.node_filters import NodeNameFilter
from model_compression_toolkit.core.common.network_editors.actions import EditRule, \
ChangeCandidatesWeightsQuantizationMethod
Expand Down

0 comments on commit 2bc3e95

Please sign in to comment.