File tree Expand file tree Collapse file tree 1 file changed +2
-2
lines changed
neural_compressor/torch/algorithms/weight_only Expand file tree Collapse file tree 1 file changed +2
-2
lines changed Original file line number Diff line number Diff line change @@ -81,7 +81,7 @@ def rtn_quantize(
8181 model .to (device )
8282
8383 assert isinstance (model , torch .nn .Module ), "only support torch module"
84- supported_layers = [ " Linear" ]
84+ supported_layers = ( torch . nn . Linear ,)
8585 # initialize global configuration
8686 double_quant_config = {
8787 "double_quant" : kwargs .get ("use_double_quant" , False ),
@@ -93,7 +93,7 @@ def rtn_quantize(
9393 if export_compressed_model :
9494 use_optimum_format = kwargs .get ("use_optimum_format" , True )
9595 for name , m in model .named_modules ():
96- if m . __class__ . __name__ not in supported_layers :
96+ if not isinstance ( m , supported_layers ) :
9797 continue
9898 if name in weight_config : # pragma: no cover
9999 # initialize op configuration
You can’t perform that action at this time.
0 commit comments