Skip to content

Commit

Permalink
Remove TPC IMX500v3 and rename TPC IMX500v4 into IMX500v3 (#1181)
Browse files Browse the repository at this point in the history
  • Loading branch information
lapid92 authored Aug 21, 2024
1 parent ed95fb2 commit 0b64802
Show file tree
Hide file tree
Showing 16 changed files with 38 additions and 530 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,6 @@ def get_tpc_dict_by_fw(fw_name):
get_keras_tpc as get_keras_tpc_v3
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v3_lut.tpc_keras import \
get_keras_tpc as get_keras_tpc_v3_lut
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v4.tpc_keras import \
get_keras_tpc as get_keras_tpc_v4

# Keras: TPC versioning
tpc_models_dict = {'v1': get_keras_tpc_v1,
Expand All @@ -53,7 +51,6 @@ def get_tpc_dict_by_fw(fw_name):
'v2_lut': get_keras_tpc_v2_lut,
'v3': get_keras_tpc_v3,
'v3_lut': get_keras_tpc_v3_lut,
'v4': get_keras_tpc_v4,
LATEST: get_keras_tpc_latest}
elif fw_name == PYTORCH:
###############################
Expand All @@ -76,8 +73,6 @@ def get_tpc_dict_by_fw(fw_name):
get_pytorch_tpc as get_pytorch_tpc_v3
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v3_lut.tpc_pytorch import \
get_pytorch_tpc as get_pytorch_tpc_v3_lut
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v4.tpc_pytorch import \
get_pytorch_tpc as get_pytorch_tpc_v4

# Pytorch: TPC versioning
tpc_models_dict = {'v1': get_pytorch_tpc_v1,
Expand All @@ -87,7 +82,6 @@ def get_tpc_dict_by_fw(fw_name):
'v2_lut': get_pytorch_tpc_v2_lut,
'v3': get_pytorch_tpc_v3,
'v3_lut': get_pytorch_tpc_v3_lut,
'v4': get_pytorch_tpc_v4,
LATEST: get_pytorch_tpc_latest}
if tpc_models_dict is not None:
return tpc_models_dict
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,16 @@ def generate_tp_model(default_config: OpQuantizationConfig,
weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO))
const_configuration_options = tp.QuantizationConfigOptions([const_config])

# 16 bits inputs and outputs. Currently, only defined for consts since they are used in operators that
# support 16 bit as input and output.
const_config_input16 = const_config.clone_and_edit(
supported_input_activation_n_bits=(8, 16))
const_config_input16_output16 = const_config_input16.clone_and_edit(
activation_n_bits=16, signedness=Signedness.SIGNED)
const_configuration_options_inout16 = tp.QuantizationConfigOptions([const_config_input16_output16,
const_config_input16],
base_config=const_config_input16)

# Create a TargetPlatformModel and set its default quantization config.
# This default configuration will be used for all operations
# unless specified otherwise (see OperatorsSet, for example):
Expand All @@ -186,8 +196,10 @@ def generate_tp_model(default_config: OpQuantizationConfig,
# May suit for operations like: Dropout, Reshape, etc.
default_qco = tp.get_default_quantization_config_options()
tp.OperatorsSet("NoQuantization",
default_qco.clone_and_edit(enable_activation_quantization=False)
default_qco.clone_and_edit(enable_activation_quantization=False,
supported_input_activation_n_bits=(8, 16))
.clone_and_edit_weight_attribute(enable_weights_quantization=False))
tp.OperatorsSet("Default16BitInout", const_configuration_options_inout16)

# Create Mixed-Precision quantization configuration options from the given list of OpQuantizationConfig objects
mixed_precision_configuration_options = tp.QuantizationConfigOptions(mixed_precision_cfg_list,
Expand All @@ -200,9 +212,9 @@ def generate_tp_model(default_config: OpQuantizationConfig,
# Define operations sets without quantization configuration
# options (useful for creating fusing patterns, for example):
any_relu = tp.OperatorsSet("AnyReLU")
add = tp.OperatorsSet("Add", const_configuration_options)
sub = tp.OperatorsSet("Sub", const_configuration_options)
mul = tp.OperatorsSet("Mul", const_configuration_options)
add = tp.OperatorsSet("Add", const_configuration_options_inout16)
sub = tp.OperatorsSet("Sub", const_configuration_options_inout16)
mul = tp.OperatorsSet("Mul", const_configuration_options_inout16)
div = tp.OperatorsSet("Div", const_configuration_options)
prelu = tp.OperatorsSet("PReLU")
swish = tp.OperatorsSet("Swish")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@
if version.parse(tf.__version__) >= version.parse("2.13"):
from keras.src.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
Conv2DTranspose, Identity
Conv2DTranspose, Identity, Concatenate
else:
from keras.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
Conv2DTranspose, Identity
Conv2DTranspose, Identity, Concatenate

from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v3.tp_model import get_tp_model
import model_compression_toolkit as mct
Expand Down Expand Up @@ -84,6 +84,7 @@ def generate_keras_tpc(name: str, tp_model: tp.TargetPlatformModel):
tf.compat.v1.gather,
tf.nn.top_k,
tf.__operators__.getitem,
tf.strided_slice,
tf.image.combined_non_max_suppression,
tf.compat.v1.shape]

Expand All @@ -92,6 +93,8 @@ def generate_keras_tpc(name: str, tp_model: tp.TargetPlatformModel):

with keras_tpc:
tp.OperationsSetToLayers("NoQuantization", no_quant_list)
tp.OperationsSetToLayers("Default16BitInout", [tf.stack,
tf.concat, Concatenate])
tp.OperationsSetToLayers("Conv",
[Conv2D,
DepthwiseConv2D,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import torch
from torch import add, sub, mul, div, flatten, reshape, split, unsqueeze, dropout, sigmoid, tanh, chunk, unbind, topk, \
gather, equal, transpose, permute, argmax, squeeze
gather, equal, transpose, permute, argmax, squeeze, multiply, subtract
from torch.nn import Conv2d, Linear, ConvTranspose2d, MaxPool2d
from torch.nn import Dropout, Flatten, Hardtanh, Identity
from torch.nn import ReLU, ReLU6, PReLU, SiLU, Sigmoid, Tanh, Hardswish, LeakyReLU
Expand Down Expand Up @@ -85,6 +85,8 @@ def generate_pytorch_tpc(name: str, tp_model: tp.TargetPlatformModel):
topk,
squeeze,
MaxPool2d])
tp.OperationsSetToLayers("Default16BitInout",
[torch.stack, torch.cat, torch.concat, torch.concatenate])

tp.OperationsSetToLayers("Conv", [Conv2d, ConvTranspose2d],
attr_mapping=pytorch_linear_attr_mapping)
Expand All @@ -101,8 +103,8 @@ def generate_pytorch_tpc(name: str, tp_model: tp.TargetPlatformModel):
tp.LayerFilterParams(hardtanh, min_val=0)])

tp.OperationsSetToLayers("Add", [operator.add, add])
tp.OperationsSetToLayers("Sub", [operator.sub, sub])
tp.OperationsSetToLayers("Mul", [operator.mul, mul])
tp.OperationsSetToLayers("Sub", [operator.sub, sub, subtract])
tp.OperationsSetToLayers("Mul", [operator.mul, mul, multiply])
tp.OperationsSetToLayers("Div", [operator.truediv, div])
tp.OperationsSetToLayers("PReLU", [PReLU, prelu])
tp.OperationsSetToLayers("Swish", [SiLU, silu, Hardswish, hardswish])
Expand Down

This file was deleted.

Loading

0 comments on commit 0b64802

Please sign in to comment.