From 387c58ddb71244d6aa23d01580314b6cbd3b1ca1 Mon Sep 17 00:00:00 2001 From: reuvenp Date: Wed, 15 Jan 2025 11:28:02 +0200 Subject: [PATCH] fix imports in tutorials --- .../keras/example_keras_pruning_mnist.ipynb | 17 ++++++----- .../keras/example_keras_qat.ipynb | 28 ++++++++----------- 2 files changed, 20 insertions(+), 25 deletions(-) diff --git a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_pruning_mnist.ipynb b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_pruning_mnist.ipynb index 8bcde15da..8ed23bc1a 100644 --- a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_pruning_mnist.ipynb +++ b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_pruning_mnist.ipynb @@ -218,19 +218,18 @@ "cell_type": "code", "source": [ "from mct_quantizers import QuantizationMethod\n", - "from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import schema, TargetPlatformCapabilities, Signedness, \\\n", - " AttributeQuantizationConfig, OpQuantizationConfig\n", + "from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import schema\n", "\n", "simd_size = 1\n", "\n", "def get_tpc():\n", " # Define the default weight attribute configuration\n", - " default_weight_attr_config = AttributeQuantizationConfig(\n", + " default_weight_attr_config = schema.AttributeQuantizationConfig(\n", " weights_quantization_method=QuantizationMethod.UNIFORM,\n", " )\n", "\n", " # Define the OpQuantizationConfig\n", - " default_config = OpQuantizationConfig(\n", + " default_config = schema.OpQuantizationConfig(\n", " default_weight_attr_config=default_weight_attr_config,\n", " attr_weights_configs_mapping={},\n", " activation_quantization_method=QuantizationMethod.UNIFORM,\n", @@ -249,11 +248,11 @@ "\n", " # Create the quantization configuration options and model\n", " default_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([default_config]))\n", - " tpc = TargetPlatformCapabilities(default_qco=default_configuration_options,\n", - " tpc_minor_version=1,\n", - " tpc_patch_version=0,\n", - " tpc_platform_type=\"custom_pruning_notebook_tpc\",\n", - " operator_set=tuple(operator_set))\n", + " tpc = schema.TargetPlatformCapabilities(default_qco=default_configuration_options,\n", + " tpc_minor_version=1,\n", + " tpc_patch_version=0,\n", + " tpc_platform_type=\"custom_pruning_notebook_tpc\",\n", + " operator_set=tuple(operator_set))\n", " return tpc\n" ], "metadata": { diff --git a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb index ef8614222..746e6b282 100644 --- a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb +++ b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb @@ -189,13 +189,9 @@ }, "outputs": [], "source": [ - "from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import Signedness, AttributeQuantizationConfig\n", - "from model_compression_toolkit import DefaultDict\n", "from model_compression_toolkit.constants import FLOAT_BITWIDTH\n", - "from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, KERAS_KERNEL, BIAS_ATTR, BIAS\n", - "from mct_quantizers import QuantizationMethod\n", - "from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import schema, TargetPlatformCapabilities, Signedness, \\\n", - " AttributeQuantizationConfig, OpQuantizationConfig, QuantizationConfigOptions\n", + "from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR\n", + "from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import schema\n", "\n", "def get_tpc():\n", " \"\"\"\n", @@ -210,7 +206,7 @@ " \"\"\"\n", "\n", " # define a default quantization config for all non-specified weights attributes.\n", - " default_weight_attr_config = AttributeQuantizationConfig(\n", + " default_weight_attr_config = schema.AttributeQuantizationConfig(\n", " weights_quantization_method=QuantizationMethod.POWER_OF_TWO,\n", " weights_n_bits=8,\n", " weights_per_channel_threshold=False,\n", @@ -218,7 +214,7 @@ " lut_values_bitwidth=None)\n", "\n", " # define a quantization config to quantize the kernel (for layers where there is a kernel attribute).\n", - " kernel_base_config = AttributeQuantizationConfig(\n", + " kernel_base_config = schema.AttributeQuantizationConfig(\n", " weights_quantization_method=QuantizationMethod.SYMMETRIC,\n", " weights_n_bits=2,\n", " weights_per_channel_threshold=True,\n", @@ -226,7 +222,7 @@ " lut_values_bitwidth=None)\n", "\n", " # define a quantization config to quantize the bias (for layers where there is a bias attribute).\n", - " bias_config = AttributeQuantizationConfig(\n", + " bias_config = schema.AttributeQuantizationConfig(\n", " weights_quantization_method=QuantizationMethod.POWER_OF_TWO,\n", " weights_n_bits=FLOAT_BITWIDTH,\n", " weights_per_channel_threshold=False,\n", @@ -237,7 +233,7 @@ " # AttributeQuantizationConfig for weights with no specific AttributeQuantizationConfig.\n", " # MCT will compress a layer's kernel and bias according to the configurations that are\n", " # set in KERNEL_ATTR and BIAS_ATTR that are passed in attr_weights_configs_mapping.\n", - " default_config = OpQuantizationConfig(\n", + " default_config = schema.OpQuantizationConfig(\n", " default_weight_attr_config=default_weight_attr_config,\n", " attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config,\n", " BIAS_ATTR: bias_config},\n", @@ -253,7 +249,7 @@ "\n", " # Set default QuantizationConfigOptions in new TargetPlatformCapabilities to be used when no other\n", " # QuantizationConfigOptions is set for an OperatorsSet.\n", - " default_configuration_options = QuantizationConfigOptions(quantization_configurations=[default_config])\n", + " default_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=[default_config])\n", " no_quantization_config = (default_configuration_options.clone_and_edit(enable_activation_quantization=False)\n", " .clone_and_edit_weight_attribute(enable_weights_quantization=False))\n", "\n", @@ -263,11 +259,11 @@ " operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.FLATTEN, qc_options=no_quantization_config))\n", "\n", "\n", - " tpc = TargetPlatformCapabilities(default_qco=default_configuration_options,\n", - " tpc_minor_version=1,\n", - " tpc_patch_version=0,\n", - " tpc_platform_type=\"custom_qat_notebook_tpc\",\n", - " operator_set=tuple(operator_set))\n", + " tpc = schema.TargetPlatformCapabilities(default_qco=default_configuration_options,\n", + " tpc_minor_version=1,\n", + " tpc_patch_version=0,\n", + " tpc_platform_type=\"custom_qat_notebook_tpc\",\n", + " operator_set=tuple(operator_set))\n", " return tpc\n" ] },