From 7314f53c4226bfc86090e0c8dca3973a3cda9812 Mon Sep 17 00:00:00 2001 From: Martin Hickey Date: Wed, 22 Jan 2025 14:18:40 +0000 Subject: [PATCH 01/13] Add github action to run mypy Signed-off-by: Martin Hickey --- .github/workflows/lint.yml | 4 ++++ .github/workflows/matchers/mypy.json | 16 ++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 .github/workflows/matchers/mypy.json diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 59678d5a..6c6a43cb 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -47,6 +47,10 @@ jobs: commands: | echo "::add-matcher::.github/workflows/matchers/pylint.json" tox -e lint + - name: "mypy" + commands: | + echo "::add-matcher::.github/workflows/matchers/mypy.json" + tox -e mypy steps: - name: "Harden Runner" diff --git a/.github/workflows/matchers/mypy.json b/.github/workflows/matchers/mypy.json new file mode 100644 index 00000000..f048fce5 --- /dev/null +++ b/.github/workflows/matchers/mypy.json @@ -0,0 +1,16 @@ +{ + "problemMatcher": [ + { + "owner": "mypy", + "pattern": [ + { + "regexp": "^(.+):(\\d+):\\s(error|warning):\\s(.+)$", + "file": 1, + "line": 2, + "severity": 3, + "message": 4 + } + ] + } + ] +} From 7f748af18006eb1a05dcdf33feb6dc85b00fb957 Mon Sep 17 00:00:00 2001 From: Martin Hickey Date: Wed, 22 Jan 2025 17:45:58 +0000 Subject: [PATCH 02/13] Need to specify types for collection assignment Fixes: fms_mo/aiu_addons/i8i8/i8i8_aiu_adapter.py:66: error: Unsupported target for indexed assignment ("Mapping[str, Any]") [index] fms_mo/aiu_addons/i8i8/i8i8_aiu_adapter.py:67: error: Unsupported target for indexed assignment ("Mapping[str, Any]") [index] fms_mo/aiu_addons/i8i8/i8i8_aiu_adapter.py:75: error: Unsupported target for indexed assignment ("Mapping[str, Any]") [index] fms_mo/aiu_addons/i8i8/i8i8_aiu_adapter.py:82: error: Unsupported target for indexed assignment ("Mapping[str, Any]") [index] fms_mo/aiu_addons/i8i8/i8i8_aiu_adapter.py:87: error: Unsupported target for indexed assignment ("Mapping[str, Any]") [index] fms_mo/quant/quantizers.py:4161: error: Incompatible types in assignment (expression has type "list[int]", variable has type "None") [assignment] fms_mo/quant/quantizers.py:4161: error: Argument 1 to "len" has incompatible type "None"; expected "Sized" [arg-type] fms_mo/quant/quantizers.py:4162: error: Unsupported target for indexed assignment ("None") [index] fms_mo/quant/quantizers.py:4162: error: Value of type "None" is not indexable [index] Signed-off-by: Martin Hickey --- fms_mo/aiu_addons/i8i8/i8i8_aiu_adapter.py | 4 ++-- fms_mo/quant/quantizers.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fms_mo/aiu_addons/i8i8/i8i8_aiu_adapter.py b/fms_mo/aiu_addons/i8i8/i8i8_aiu_adapter.py index 7dbb9377..99a5225c 100644 --- a/fms_mo/aiu_addons/i8i8/i8i8_aiu_adapter.py +++ b/fms_mo/aiu_addons/i8i8/i8i8_aiu_adapter.py @@ -14,7 +14,7 @@ """Implement FMS adapter for INT8xINT8 checkpoints""" # Standard -from typing import Mapping +from typing import Mapping, MutableMapping # Third Party from fms.utils import serialization @@ -46,7 +46,7 @@ def _int8_qparams_aiu( def _add_defaults_and_concat( - new_sd: Mapping[str, torch.Tensor], + new_sd: MutableMapping[str, torch.Tensor], modules_seen: set, ) -> None: """ diff --git a/fms_mo/quant/quantizers.py b/fms_mo/quant/quantizers.py index c97dbfa8..bc21453f 100644 --- a/fms_mo/quant/quantizers.py +++ b/fms_mo/quant/quantizers.py @@ -4035,8 +4035,8 @@ def __init__( self.reset_ReSig_param(multimodal) self.beta = 2 / 3 - self.Wshape = None - self.reshape2 = None + self.Wshape: list[Any] = list() + self.reshape2: list[Any] = list() def forward(self, x): if self.useSAWB: From 7c0fc69a3d26d902fb123be1c097f9457da69636 Mon Sep 17 00:00:00 2001 From: Martin Hickey Date: Thu, 23 Jan 2025 16:35:04 +0000 Subject: [PATCH 03/13] Fix incompatible types in assignment Fix: ms_mo/quant/quantizers.py:3933: error: Incompatible types in assignment (expression has type "int | float", variable has type "Tensor") [assignment] fms_mo/quant/quantizers.py:3945: error: Incompatible types in assignment (expression has type "float", variable has type "Tensor | None") [assignment] fms_mo/quant/quantizers.py:3947: error: Incompatible types in assignment (expression has type "Any | int", variable has type "Tensor | None") [assignment] fms_mo/quant/quantizers.py:4190: error: Incompatible types in assignment (expression has type "Parameter", variable has type "None") [assignment] fms_mo/quant/quantizers.py:4196: error: Incompatible types in assignment (expression has type "Size", variable has type "list[Any]") [assignment] fms_mo/modules/linear.py:1341: error: Incompatible types in assignment (expression has type "tuple[int, int]", variable has type "tuple[int, int, int]") [assignment] fms_mo/utils/torchscript_utils.py:58: error: Incompatible types in assignment (expression has type "list[str] | None", variable has type "list[str]") [assignment] fms_mo/utils/qconfig_utils.py:167: error: Incompatible types in assignment (expression has type "int", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:168: error: Incompatible types in assignment (expression has type "int", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:169: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:170: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:171: error: Incompatible types in assignment (expression has type "None", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:172: error: Incompatible types in assignment (expression has type "None", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:173: error: Incompatible types in assignment (expression has type "int", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:174: error: Incompatible types in assignment (expression has type "int", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:175: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:176: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:178: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:179: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:180: error: Incompatible types in assignment (expression has type "tuple[float, float]", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:183: error: Incompatible types in assignment (expression has type "list[Never]", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:184: error: Incompatible types in assignment (expression has type "list[Never]", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:188: error: Incompatible types in assignment (expression has type "None", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:189: error: Incompatible types in assignment (expression has type "None", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:190: error: Incompatible types in assignment (expression has type "None", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:191: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:192: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:193: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:194: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:195: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:196: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:197: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:199: error: Incompatible types in assignment (expression has type "None", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:203: error: Incompatible types in assignment (expression has type "None", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:204: error: Incompatible types in assignment (expression has type "None", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:205: error: Incompatible types in assignment (expression has type "None", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:206: error: Incompatible types in assignment (expression has type "None", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:207: error: Incompatible types in assignment (expression has type "None", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:208: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:210: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:213: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:214: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:215: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:216: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:217: error: Incompatible types in assignment (expression has type "int", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:218: error: Incompatible types in assignment (expression has type "int", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:219: error: Incompatible types in assignment (expression has type "int", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:220: error: Incompatible types in assignment (expression has type "int", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:221: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:222: error: Incompatible types in assignment (expression has type "float", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:223: error: Incompatible types in assignment (expression has type "float", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:224: error: Incompatible types in assignment (expression has type "float", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:226: error: Incompatible types in assignment (expression has type "list[Never]", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:227: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:228: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:229: error: Incompatible types in assignment (expression has type "str", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:230: error: Incompatible types in assignment (expression has type "list[Never]", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:237: error: Incompatible types in assignment (expression has type "None", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:238: error: Incompatible types in assignment (expression has type "int", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:239: error: Incompatible types in assignment (expression has type "int", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:240: error: Incompatible types in assignment (expression has type "int", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:243: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:244: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:245: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:246: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:247: error: Incompatible types in assignment (expression has type "list[Never]", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:248: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:249: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:250: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:251: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:252: error: Incompatible types in assignment (expression has type "bool", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:253: error: Incompatible types in assignment (expression has type "int", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/utils/qconfig_utils.py:257: error: Incompatible types in assignment (expression has type "float", target has type "dict[type, dict[str, type]]") [assignment] fms_mo/quant/ptq.py:2646: error: Incompatible types in assignment (expression has type "tuple[type[Conv2d], type[Linear]]", variable has type "tuple[type[Conv2d], type[Linear], type[DetQConv2d]]") [assignment] Signed-off-by: Martin Hickey --- fms_mo/quant/ptq.py | 2 +- fms_mo/quant/quantizers.py | 2 +- fms_mo/utils/qconfig_utils.py | 4 ++-- fms_mo/utils/torchscript_utils.py | 3 ++- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/fms_mo/quant/ptq.py b/fms_mo/quant/ptq.py index db79b364..c3e40623 100644 --- a/fms_mo/quant/ptq.py +++ b/fms_mo/quant/ptq.py @@ -2643,7 +2643,7 @@ def reset_bn(module: nn.BatchNorm2d): bn_affine = True # FrozenBN doesn't have .affine property except: BNofInteret = (nn.BatchNorm2d, nn.BatchNorm1d) - AbsorbLayers = (nn.Conv2d, nn.Linear) + AbsorbLayers = (nn.Conv2d, nn.Linear) # type: ignore[assignment] def search_fold_and_remove_bn(model, mod_folded): diff --git a/fms_mo/quant/quantizers.py b/fms_mo/quant/quantizers.py index bc21453f..f05f5698 100644 --- a/fms_mo/quant/quantizers.py +++ b/fms_mo/quant/quantizers.py @@ -4035,7 +4035,7 @@ def __init__( self.reset_ReSig_param(multimodal) self.beta = 2 / 3 - self.Wshape: list[Any] = list() + self.Wshape: list[int] = list() self.reshape2: list[Any] = list() def forward(self, x): diff --git a/fms_mo/utils/qconfig_utils.py b/fms_mo/utils/qconfig_utils.py index 4f1e297d..cd43bce5 100644 --- a/fms_mo/utils/qconfig_utils.py +++ b/fms_mo/utils/qconfig_utils.py @@ -15,7 +15,7 @@ # Standard from pathlib import Path -from typing import Any +from typing import Any, Dict import json import logging import os @@ -149,7 +149,7 @@ def qconfig_init(recipe: str = None, args: Any = None): otherwise use constantLR as default """ - qcfg = {} + qcfg: Dict[str, Any] = {} # 1. create a dict with default values qcfg["mapping"] = { nn.Conv2d: {"from": nn.Conv2d, "to": QConv2d, "otherwise": QConv2d}, diff --git a/fms_mo/utils/torchscript_utils.py b/fms_mo/utils/torchscript_utils.py index 39025b8d..3d3985fc 100644 --- a/fms_mo/utils/torchscript_utils.py +++ b/fms_mo/utils/torchscript_utils.py @@ -55,7 +55,8 @@ def parse_operation(op_str: str): operands = op_str[ last_open_parenthesis_index + 1 : last_close_parenthesis_index ].split(",") - operands = [operand.strip() for operand in operands] if operands != [""] else None + # pylint: disable=line-too-long + operands = [operand.strip() for operand in operands] if operands != [""] else None # type: ignore[assignment] return operator, operands From d737c0e626e649cdbdd70bfea14978246a2f70ce Mon Sep 17 00:00:00 2001 From: Martin Hickey Date: Fri, 24 Jan 2025 17:14:36 +0000 Subject: [PATCH 04/13] Fix has not attribute Fix: fms_mo/quant/quantizers.py:3898: error: Item "None" of "Parameter | None" has no attribute "fill_" [union-attr] fms_mo/quant/quantizers.py:3963: error: Item "float" of "Any | float" has no attribute "round" [union-attr] fms_mo/quant/quantizers.py:5392: error: Module has no attribute "float8_e5m2G" [attr-defined] fms_mo/custom_ext_kernels/utils.py:77: error: Module has no attribute "custom_op" [attr-defined] fms_mo/custom_ext_kernels/utils.py:79: error: Module has no attribute "register_kernel" [attr-defined] fms_mo/custom_ext_kernels/utils.py:80: error: Module has no attribute "register_fake" [attr-defined] fms_mo/utils/torchscript_utils.py:182: error: Item "None" of "Node | None" has no attribute "inputs" [union-attr] fms_mo/utils/torchscript_utils.py:184: error: Item "None" of "Node | None" has no attribute "outputs" [union-attr] fms_mo/utils/torchscript_utils.py:204: error: "Node" has no attribute "name" [attr-defined] fms_mo/utils/torchscript_utils.py:204: error: "Node" has no attribute "obj" [attr-defined] fms_mo/utils/torchscript_utils.py:205: error: "Node" has no attribute "name" [attr-defined] fms_mo/utils/torchscript_utils.py:207: error: "Node" has no attribute "name" [attr-defined] fms_mo/utils/torchscript_utils.py:209: error: "Node" has no attribute "name" [attr-defined] fms_mo/utils/torchscript_utils.py:212: error: "Node" has no attribute "Op" [attr-defined] fms_mo/utils/torchscript_utils.py:214: error: "Node" has no attribute "unpackIdx" [attr-defined] fms_mo/utils/torchscript_utils.py:216: error: "Node" has no attribute "lineno" [attr-defined] fms_mo/utils/torchscript_utils.py:217: error: "Node" has no attribute "operator" [attr-defined] fms_mo/utils/torchscript_utils.py:219: error: "Node" has no attribute "parents" [attr-defined] fms_mo/utils/torchscript_utils.py:220: error: "Node" has no attribute "parents_ptr" [attr-defined] fms_mo/utils/torchscript_utils.py:221: error: "Node" has no attribute "scope" [attr-defined] fms_mo/utils/torchscript_utils.py:222: error: "Node" has no attribute "modname" [attr-defined] fms_mo/utils/torchscript_utils.py:223: error: "Node" has no attribute "children" [attr-defined] fms_mo/utils/torchscript_utils.py:224: error: "Node" has no attribute "children_ptr" [attr-defined] fms_mo/utils/torchscript_utils.py:225: error: "Node" has no attribute "TSparents" [attr-defined] fms_mo/utils/torchscript_utils.py:226: error: "Node" has no attribute "TSoutputs" [attr-defined] fms_mo/utils/torchscript_utils.py:228: error: "Node" has no attribute "name" [attr-defined] fms_mo/calib.py:485: error: Item "None" of "FrameType | None" has no attribute "f_code" [union-attr] fms_mo/quant/ptq.py:2626: error: Item "None" of "Tensor | None" has no attribute "zero_" [union-attr] fms_mo/quant/ptq.py:2627: error: Item "None" of "Tensor | None" has no attribute "fill_" [union-attr] Signed-off-by: Martin Hickey --- fms_mo/calib.py | 3 +- fms_mo/custom_ext_kernels/utils.py | 6 ++-- fms_mo/quant/ptq.py | 6 ++-- fms_mo/quant/quantizers.py | 7 +++-- fms_mo/utils/torchscript_utils.py | 48 +++++++++++++++++------------- 5 files changed, 41 insertions(+), 29 deletions(-) diff --git a/fms_mo/calib.py b/fms_mo/calib.py index cff4c67a..9a802894 100644 --- a/fms_mo/calib.py +++ b/fms_mo/calib.py @@ -482,7 +482,8 @@ def qmodel_calib( return model DPorDDPdevices = None - if "qmodel_prep" not in sys._getframe().f_back.f_code.co_name: + f_back = sys._getframe().f_back + if f_back and "qmodel_prep" not in f_back.f_code.co_name: model.to(currDev) qcfg["wasDPmodel"] = qcfg.get("wasDPmodel", isinstance(model, nn.DataParallel)) qcfg["wasDDPmodel"] = qcfg.get( diff --git a/fms_mo/custom_ext_kernels/utils.py b/fms_mo/custom_ext_kernels/utils.py index b3c60c4d..21bb52a2 100644 --- a/fms_mo/custom_ext_kernels/utils.py +++ b/fms_mo/custom_ext_kernels/utils.py @@ -74,10 +74,10 @@ # Third Party import torch.library as lib - reg_op = partial(lib.custom_op, mutates_args=()) + reg_op = partial(lib.custom_op, mutates_args=()) # type: ignore[attr-defined] reg_op_func = lib.define # NOTE this is func, not decorator - kernel_impl = lib.register_kernel - reg_fake = lib.register_fake + kernel_impl = lib.register_kernel # type: ignore[attr-defined] + reg_fake = lib.register_fake # type: ignore[attr-defined] else: raise RuntimeError("Custom Op registration only works for >PT2.1") diff --git a/fms_mo/quant/ptq.py b/fms_mo/quant/ptq.py index c3e40623..4223ad1d 100644 --- a/fms_mo/quant/ptq.py +++ b/fms_mo/quant/ptq.py @@ -2623,8 +2623,10 @@ def reset_bn(module: nn.BatchNorm2d): Function not currently used. """ if module.track_running_stats: - module.running_mean.zero_() - module.running_var.fill_(1 - module.eps) + if running_mean := module.running_mean: + running_mean.zero_() + if running_var := module.running_var: + running_var.fill_(1 - module.eps) # we do not reset numer of tracked batches here if module.affine: nn.init.ones_(module.weight) diff --git a/fms_mo/quant/quantizers.py b/fms_mo/quant/quantizers.py index f05f5698..afd742be 100644 --- a/fms_mo/quant/quantizers.py +++ b/fms_mo/quant/quantizers.py @@ -3895,7 +3895,8 @@ def forward(self, x: torch.Tensor): self.delta = torch.nn.Parameter(delta) else: delta, zero_point = self.init_quantization_scale(x, self.channel_wise) - self.delta.fill_(delta) + if self_data := self.delta: + self_data.fill_(delta) self.zero_point.fill_(zero_point) self.inited = True @@ -3960,7 +3961,7 @@ def init_quantization_scale(self, x: torch.Tensor, channel_wise: bool = False): if score < best_score: best_score = score delta = (new_max - new_min) / (2**self.n_bits - 1) - zero_point = (-new_min / delta).round() + zero_point = (-new_min / delta).round() # type: ignore[union-attr] else: raise NotImplementedError @@ -5389,7 +5390,7 @@ def __init__( if "e4m3" in q_mode: self.float8_dtype = torch.float8_e4m3fn elif "e5m2" in q_mode: - self.float8_dtype = torch.float8_e5m2G + self.float8_dtype = torch.float8_e5m2 else: raise ValueError("FP8 only supports e4m3 and e5m2") self.emulate = emulate diff --git a/fms_mo/utils/torchscript_utils.py b/fms_mo/utils/torchscript_utils.py index 3d3985fc..0be4083c 100644 --- a/fms_mo/utils/torchscript_utils.py +++ b/fms_mo/utils/torchscript_utils.py @@ -179,9 +179,14 @@ def __init__(self, node_input, dictionary_of_nodes: dict): ) operator, operands = parse_operation(op_str) if "aten::_conv" in op_str: - self.ch_in = list(native_torchscript_node.inputs())[0].type().sizes() - # NOTE: Needed for finding shortcut convolutions later - self.ch_out = list(native_torchscript_node.outputs())[0].type().sizes() + if native_torchscript_node: + self.ch_in = ( + list(native_torchscript_node.inputs())[0].type().sizes() + ) + # NOTE: Needed for finding shortcut convolutions later + self.ch_out = ( + list(native_torchscript_node.outputs())[0].type().sizes() + ) else: node_def = node_input_repr op_str, operator, operands = None, None, None @@ -201,31 +206,34 @@ def __init__(self, node_input, dictionary_of_nodes: dict): working_str = node_input_repr[start_index:end_index] start_index = end_index + 2 - node_instance.name, node_instance.obj = working_str.split(" : ") - node_instance.name = node_instance.name.strip() + # pylint: disable=line-too-long + node_instance.name, node_instance.obj = working_str.split(" : ") # type: ignore[attr-defined] + node_instance.name = node_instance.name.strip() # type: ignore[attr-defined] if native_torchscript_outputs: - if node_instance.name not in native_torchscript_outputs: + # pylint: disable=line-too-long + if node_instance.name not in native_torchscript_outputs: # type: ignore[attr-defined] + # pylint: disable=line-too-long logger.error( - f"Node def {node_instance.name} not in nativeTSoutputs " + f"Node def {node_instance.name} not in nativeTSoutputs " # type: ignore[attr-defined] f"{native_torchscript_outputs}" ) - node_instance.Op = op_str + node_instance.Op = op_str # type: ignore[attr-defined] if node_def_in_one_line > 1: - node_instance.unpackIdx = node_index + node_instance.unpackIdx = node_index # type: ignore[attr-defined] if line_number: - node_instance.lineno = line_number - node_instance.operator = operator + node_instance.lineno = line_number # type: ignore[attr-defined] + node_instance.operator = operator # type: ignore[attr-defined] # This is the name of parents, not the pointer to the parent nodes - node_instance.parents = operands - node_instance.parents_ptr = [] - node_instance.scope = scope_repr - node_instance.modname = module_name - node_instance.children = [] - node_instance.children_ptr = [] - node_instance.TSparents = native_torchscript_parents - node_instance.TSoutputs = native_torchscript_outputs + node_instance.parents = operands # type: ignore[attr-defined] + node_instance.parents_ptr = [] # type: ignore[attr-defined] + node_instance.scope = scope_repr # type: ignore[attr-defined] + node_instance.modname = module_name # type: ignore[attr-defined] + node_instance.children = [] # type: ignore[attr-defined] + node_instance.children_ptr = [] # type: ignore[attr-defined] + node_instance.TSparents = native_torchscript_parents # type: ignore[attr-defined] + node_instance.TSoutputs = native_torchscript_outputs # type: ignore[attr-defined] # graph.dictionary_of_nodes will keep a record of all the nodes - dictionary_of_nodes[node_instance.name] = node_instance + dictionary_of_nodes[node_instance.name] = node_instance # type: ignore[attr-defined] def __repr__(self): return f"{self.name} " From d05c6c4294163d9d5ef1d168b9a062a0f7826ff8 Mon Sep 17 00:00:00 2001 From: Martin Hickey Date: Fri, 24 Jan 2025 17:30:25 +0000 Subject: [PATCH 05/13] Disable import errors Disabled: fms_mo/utils/custom_gptq_models.py:18: error: Cannot find implementation or library stub for module named "auto_gptq.modeling" [import-not-found] fms_mo/utils/utils.py:32: error: Skipping analyzing "transformers.tokenization_utils_base": module is installed, but missing library stubs or py.typed marker [import-untyped] fms_mo/utils/utils.py:328: error: Library stubs not installed for "pandas" [import-untyped] fms_mo/utils/import_utils.py:20: error: Skipping analyzing "transformers.utils.import_utils": module is installed, but missing library stubs or py.typed marker [import-untyped] fms_mo/utils/calib_data.py:29: error: Skipping analyzing "transformers": module is installed, but missing library stubs or py.typed marker [import-untyped] fms_mo/aiu_addons/i8i8/i8i8_aiu_adapter.py:20: error: Cannot find implementation or library stub for module named "fms.utils" [import-not-found] fms_mo/aiu_addons/gptq/gptq_aiu_adapter.py:20: error: Cannot find implementation or library stub for module named "fms.utils" [import-not-found] fms_mo/aiu_addons/i8i8/i8i8_aiu_linear.py:21: error: Cannot find implementation or library stub for module named "fms.modules.linear" [import-not-found] fms_mo/aiu_addons/i8i8/i8i8_aiu_linear.py:28: error: Cannot find implementation or library stub for module named "fms.modules.tp" [import-not-found] fms_mo/aiu_addons/i8i8/i8i8_aiu_linear.py:29: error: Cannot find implementation or library stub for module named "fms.utils.config" [import-not-found] fms_mo/aiu_addons/gptq/gptq_aiu_linear.py:21: error: Cannot find implementation or library stub for module named "fms.modules.linear" [import-not-found] fms_mo/aiu_addons/gptq/gptq_aiu_linear.py:28: error: Cannot find implementation or library stub for module named "fms.modules.tp" [import-not-found] fms_mo/aiu_addons/gptq/gptq_aiu_linear.py:29: error: Cannot find implementation or library stub for module named "fms.utils.gptq" [import-not-found] fms_mo/custom_ext_kernels/utils.py:32: error: Skipping analyzing "transformers.pytorch_utils": module is installed, but missing library stubs or py.typed marker [import-untyped] fms_mo/custom_ext_kernels/utils.py:530: error: Cannot find implementation or library stub for module named "exllama_kernels" [import-not-found] fms_mo/custom_ext_kernels/utils.py:531: error: Cannot find implementation or library stub for module named "exllamav2_kernels" [import-not-found] fms_mo/custom_ext_kernels/utils.py:1113: error: Cannot find implementation or library stub for module named "auto_gptq.nn_modules.qlinear.qlinear_exllama" [import-not-found] fms_mo/custom_ext_kernels/utils.py:1116: error: Cannot find implementation or library stub for module named "auto_gptq.nn_modules.qlinear.qlinear_exllamav2" [import-not-found] fms_mo/modules/linear.py:1079: error: Cannot find implementation or library stub for module named "cutlass_mm" [import-not-found] fms_mo/modules/linear.py:1405: error: Cannot find implementation or library stub for module named "auto_gptq.nn_modules.qlinear.qlinear_exllama" [import-not-found] fms_mo/modules/linear.py:1408: error: Cannot find implementation or library stub for module named "auto_gptq.nn_modules.qlinear.qlinear_exllamav2" [import-not-found] fms_mo/modules/linear.py:1412: error: Cannot find implementation or library stub for module named "exllama_kernels" [import-not-found] fms_mo/modules/linear.py:1413: error: Skipping analyzing "transformers.pytorch_utils": module is installed, but missing library stubs or py.typed marker [import-untyped] fms_mo/fx/utils.py:214: error: Cannot find implementation or library stub for module named "auto_gptq.modeling._utils" [import-not-found] fms_mo/fx/utils.py:524: error: Cannot find implementation or library stub for module named "pygraphviz" [import-not-found] fms_mo/utils/torchscript_utils.py:27: error: Skipping analyzing "transformers.tokenization_utils_base": module is installed, but missing library stubs or py.typed marker [import-untyped] fms_mo/utils/torchscript_utils.py:563: error: Cannot find implementation or library stub for module named "pygraphviz" [import-not-found] fms_mo/utils/torchscript_utils.py:686: error: Cannot find implementation or library stub for module named "matplotlib.image" [import-not-found] fms_mo/utils/torchscript_utils.py:686: error: Cannot find implementation or library stub for module named "matplotlib" [import-not-found] fms_mo/utils/torchscript_utils.py:687: error: Cannot find implementation or library stub for module named "matplotlib.pyplot" [import-not-found] fms_mo/quant/ptq.py:36: error: Library stubs not installed for "pandas" [import-untyped] fms_mo/quant/ptq.py:59: error: Cannot find implementation or library stub for module named "piqa.piqa" [import-not-found] fms_mo/fx/dynamo_utils.py:1060: error: Skipping analyzing "transformers": module is installed, but missing library stubs or py.typed marker [import-untyped] fms_mo/fx/dynamo_utils.py:1158: error: Skipping analyzing "torchvision.models.detection.rpn": module is installed, but missing library stubs or py.typed marker [import-untyped] fms_mo/fx/dynamo_utils.py:1159: error: Skipping analyzing "torchvision.ops": module is installed, but missing library stubs or py.typed marker [import-untyped] fms_mo/calib.py:24: error: Skipping analyzing "transformers.tokenization_utils_base": module is installed, but missing library stubs or py.typed marker [import-untyped] fms_mo/calib.py:470: error: Library stubs not installed for "pandas" [import-untyped] fms_mo/dq.py:30: error: Skipping analyzing "transformers": module is installed, but missing library stubs or py.typed marker [import-untyped] fms_mo/run_quant.py:39: error: Skipping analyzing "transformers": module is installed, but missing library stubs or py.typed marker [import-untyped] fms_mo/run_quant.py:127: error: Cannot find implementation or library stub for module named "auto_gptq" [import-not-found] fms_mo/run_quant.py:128: error: Cannot find implementation or library stub for module named "auto_gptq.modeling._const" [import-not-found] fms_mo/run_quant.py:129: error: Cannot find implementation or library stub for module named "auto_gptq.modeling.auto" [import-not-found] fms_mo/run_quant.py:194: error: Cannot find implementation or library stub for module named "llmcompressor.modifiers.quantization" [import-not-found] fms_mo/run_quant.py:195: error: Cannot find implementation or library stub for module named "llmcompressor.transformers" [import-not-found] tests/models/test_qmodelprep.py:22: error: Skipping analyzing "torchvision": module is installed, but missing library stubs or py.typed marker [import-untyped] tests/models/test_qmodelprep.py:23: error: Skipping analyzing "transformers": module is installed, but missing library stubs or py.typed marker [import-untyped] tests/models/conftest.py:25: error: Skipping analyzing "torchvision.io": module is installed, but missing library stubs or py.typed marker [import-untyped] tests/models/conftest.py:26: error: Skipping analyzing "torchvision.models": module is installed, but missing library stubs or py.typed marker [import-untyped] tests/models/conftest.py:27: error: Skipping analyzing "transformers": module is installed, but missing library stubs or py.typed marker [import-untyped] Signed-off-by: Martin Hickey --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 38e1efce..57c3ded4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -129,7 +129,7 @@ known-local-folder=["fms_mo","tests"] [tool.mypy] mypy_path = [""] packages = ["fms_mo", "tests"] -disable_error_code = [] +disable_error_code = ["import-not-found", "import-untyped"] # TODO: tighten MyPy checks by enabling these checks over time. check_untyped_defs = false disallow_incomplete_defs = false From b0ea939d8fd8e1347e6f1693045d864f00875665 Mon Sep 17 00:00:00 2001 From: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> Date: Thu, 10 Apr 2025 09:39:28 -0400 Subject: [PATCH 06/13] avoid init values as None Signed-off-by: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> --- fms_mo/quant/quantizers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fms_mo/quant/quantizers.py b/fms_mo/quant/quantizers.py index afd742be..f71f0d5c 100644 --- a/fms_mo/quant/quantizers.py +++ b/fms_mo/quant/quantizers.py @@ -3907,7 +3907,7 @@ def forward(self, x: torch.Tensor): return x_dequant def init_quantization_scale(self, x: torch.Tensor, channel_wise: bool = False): - delta, zero_point = None, None + delta, zero_point = 1.0, 0 # init seems unnecessary, at least avoid None causing type chk err if channel_wise: x_clone = x.clone().detach() n_channels = x_clone.shape[0] From 26ffe95b25e823bb3503562d7bcf9a4242e52a2f Mon Sep 17 00:00:00 2001 From: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> Date: Thu, 10 Apr 2025 09:46:04 -0400 Subject: [PATCH 07/13] Update quantizers.py Signed-off-by: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> --- fms_mo/quant/quantizers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fms_mo/quant/quantizers.py b/fms_mo/quant/quantizers.py index f71f0d5c..f6d6edfd 100644 --- a/fms_mo/quant/quantizers.py +++ b/fms_mo/quant/quantizers.py @@ -3907,7 +3907,8 @@ def forward(self, x: torch.Tensor): return x_dequant def init_quantization_scale(self, x: torch.Tensor, channel_wise: bool = False): - delta, zero_point = 1.0, 0 # init seems unnecessary, at least avoid None causing type chk err + delta, zero_point = 1.0, 0 + # init seems unnecessary, but at least avoid None induced type chk err if channel_wise: x_clone = x.clone().detach() n_channels = x_clone.shape[0] From f2e39944ebd320764b49b2a56eea4a7e619dd8b0 Mon Sep 17 00:00:00 2001 From: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> Date: Thu, 10 Apr 2025 09:58:07 -0400 Subject: [PATCH 08/13] Update quantizers.py Signed-off-by: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> --- fms_mo/quant/quantizers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fms_mo/quant/quantizers.py b/fms_mo/quant/quantizers.py index f6d6edfd..9c40a067 100644 --- a/fms_mo/quant/quantizers.py +++ b/fms_mo/quant/quantizers.py @@ -3907,8 +3907,8 @@ def forward(self, x: torch.Tensor): return x_dequant def init_quantization_scale(self, x: torch.Tensor, channel_wise: bool = False): - delta, zero_point = 1.0, 0 - # init seems unnecessary, but at least avoid None induced type chk err + # delta, zero_point = 1.0, 0 + # init seems unnecessary, comment out to avoid None induced type chk err if channel_wise: x_clone = x.clone().detach() n_channels = x_clone.shape[0] From ee20c8126ae75bead89afb16c141d5a6a69677ab Mon Sep 17 00:00:00 2001 From: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> Date: Thu, 10 Apr 2025 12:03:33 -0400 Subject: [PATCH 09/13] ignore 3 assignment errs and 1 call-overload err Signed-off-by: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> --- fms_mo/quant/quantizers.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fms_mo/quant/quantizers.py b/fms_mo/quant/quantizers.py index 9c40a067..0092f6aa 100644 --- a/fms_mo/quant/quantizers.py +++ b/fms_mo/quant/quantizers.py @@ -3932,25 +3932,25 @@ def init_quantization_scale(self, x: torch.Tensor, channel_wise: bool = False): else: if "max" in self.scale_method: x_min = min(x.min().item(), 0) - x_max = max(x.max().item(), 0) + x_max = max(x.max().item(), 0) # type: ignore[assignment] if "scale" in self.scale_method: x_min = x_min * (self.n_bits + 2) / 8 x_max = x_max * (self.n_bits + 2) / 8 - x_absmax = max(abs(x_min), x_max) + x_absmax = max(abs(x_min), x_max) # type: ignore [call-overload] if self.sym: x_min, x_max = -x_absmax if x_min < 0 else 0, x_absmax delta = float(x_max - x_min) / (self.n_levels - 1) if delta < 1e-8: logger.info(f"Quantization range close to zero: [{x_min}, {x_max}]") - delta = 1e-8 + delta = 1e-8 # type: ignore[assignment] zero_point = round(-x_min / delta) elif self.scale_method == "mse": x_max = x.max() - x_min = x.min() + x_min = x.min() # type: ignore[assignment] best_score = 1e10 for i in range(80): new_max = x_max * (1.0 - (i * 0.01)) From 5495560d4d40fd381e9ff4f7f7efb699a9020bc6 Mon Sep 17 00:00:00 2001 From: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> Date: Thu, 10 Apr 2025 13:06:33 -0400 Subject: [PATCH 10/13] try to disable mypy err for the entire file Signed-off-by: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> --- fms_mo/quant/quantizers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fms_mo/quant/quantizers.py b/fms_mo/quant/quantizers.py index 0092f6aa..1b6ab14b 100644 --- a/fms_mo/quant/quantizers.py +++ b/fms_mo/quant/quantizers.py @@ -23,6 +23,7 @@ """ # pylint: disable=too-many-return-statements +# mypy: disable-error-code="assignment" # Standard from collections.abc import Mapping @@ -3932,7 +3933,7 @@ def init_quantization_scale(self, x: torch.Tensor, channel_wise: bool = False): else: if "max" in self.scale_method: x_min = min(x.min().item(), 0) - x_max = max(x.max().item(), 0) # type: ignore[assignment] + x_max = max(x.max().item(), 0) if "scale" in self.scale_method: x_min = x_min * (self.n_bits + 2) / 8 x_max = x_max * (self.n_bits + 2) / 8 From a418e297fcf0ef5499d71ba3765cde947871bf6b Mon Sep 17 00:00:00 2001 From: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> Date: Thu, 10 Apr 2025 13:50:25 -0400 Subject: [PATCH 11/13] Update quantizers.py Signed-off-by: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> --- fms_mo/quant/quantizers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fms_mo/quant/quantizers.py b/fms_mo/quant/quantizers.py index 1b6ab14b..68f0cf53 100644 --- a/fms_mo/quant/quantizers.py +++ b/fms_mo/quant/quantizers.py @@ -3945,13 +3945,13 @@ def init_quantization_scale(self, x: torch.Tensor, channel_wise: bool = False): delta = float(x_max - x_min) / (self.n_levels - 1) if delta < 1e-8: logger.info(f"Quantization range close to zero: [{x_min}, {x_max}]") - delta = 1e-8 # type: ignore[assignment] + delta = 1e-8 zero_point = round(-x_min / delta) elif self.scale_method == "mse": x_max = x.max() - x_min = x.min() # type: ignore[assignment] + x_min = x.min() best_score = 1e10 for i in range(80): new_max = x_max * (1.0 - (i * 0.01)) @@ -5454,7 +5454,7 @@ def custom_fp8_quantizer( mantissa_bits: int = 3, use_subnormal: bool = False, scale_to_max: bool = False, -) -> torch.Tensor: +): """Convert tensor tensor to FP8 format, remanining in decimal form (no binary conversion) and using some clever manipulation to round each tensor values to the closest representable FP8 value. From ee18e75b01986646fdb08d7475b97a5cda4fb915 Mon Sep 17 00:00:00 2001 From: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> Date: Thu, 10 Apr 2025 14:01:41 -0400 Subject: [PATCH 12/13] Update quantizers.py Signed-off-by: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> --- fms_mo/quant/quantizers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fms_mo/quant/quantizers.py b/fms_mo/quant/quantizers.py index 68f0cf53..c509ea01 100644 --- a/fms_mo/quant/quantizers.py +++ b/fms_mo/quant/quantizers.py @@ -4586,7 +4586,7 @@ def transformers_prepare_input( if isinstance(data, Mapping): return type(data)( {k: transformers_prepare_input(v, dev=dev) for k, v in data.items()} - ) + ) # type: ignore[call-arg] if isinstance(data, (tuple, list)): return type(data)(transformers_prepare_input(v, dev=dev) for v in data) if isinstance(data, torch.Tensor): From 4c7f0687643c32e098a92075bed8752d955ded52 Mon Sep 17 00:00:00 2001 From: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> Date: Thu, 10 Apr 2025 14:15:02 -0400 Subject: [PATCH 13/13] ignore "no-any-return" Signed-off-by: chichun-charlie-liu <57839396+chichun-charlie-liu@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 503c0100..6ad0a81f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -132,7 +132,7 @@ known-local-folder=["fms_mo","tests"] [tool.mypy] mypy_path = [""] packages = ["fms_mo", "tests"] -disable_error_code = ["import-not-found", "import-untyped"] +disable_error_code = ["import-not-found", "import-untyped", "no-any-return"] # TODO: tighten MyPy checks by enabling these checks over time. check_untyped_defs = false disallow_incomplete_defs = false