Skip to content

Commit

Permalink
Verify bias is not None in GPTQ bias training
Browse files Browse the repository at this point in the history
  • Loading branch information
Ofir Gordon authored and Ofir Gordon committed Jun 17, 2024
1 parent c77aee7 commit 37e8a1c
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 5 deletions.
2 changes: 1 addition & 1 deletion model_compression_toolkit/gptq/keras/gptq_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ def update_graph(self):
node.final_activation_quantization_cfg.set_quant_config_attr(config_attr, config_value)
if self.gptq_config.train_bias:
use_bias = layer.layer.get_config().get(USE_BIAS)
if use_bias is not None and use_bias:
if use_bias is not None and use_bias and layer.layer.bias is not None:
new_bias = layer.layer.bias.numpy()
node.set_weights_by_keys(BIAS, new_bias)

Expand Down
2 changes: 1 addition & 1 deletion model_compression_toolkit/gptq/keras/graph_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def get_gptq_trainable_parameters(fxp_model: Model,
kernel_ops_attrs = fw_info.kernel_ops_attributes_mapping.get(type(layer.layer))
use_bias = kernel_ops_attrs is not None and kernel_ops_attrs[0] is not None \
and layer.layer.get_config().get(USE_BIAS)
if use_bias is not None and use_bias:
if use_bias is not None and use_bias and layer.layer.bias is not None:
bias_weights.append([layer.layer.bias])

return trainable_weights, bias_weights, trainable_threshold
Expand Down
7 changes: 5 additions & 2 deletions model_compression_toolkit/gptq/pytorch/gptq_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,9 @@ def update_graph(self) -> Graph:
for config_attr, config_value in activation_quant_config.items():
node.final_activation_quantization_cfg.set_quant_config_attr(config_attr, config_value)
if self.gptq_config.train_bias and hasattr(layer.layer, BIAS):
node.set_weights_by_keys(BIAS, self.fw_impl.to_numpy(getattr(layer.layer, BIAS)))
bias = getattr(layer.layer, BIAS)
if bias is not None:
node.set_weights_by_keys(BIAS, self.fw_impl.to_numpy(bias))

return graph_quant

Expand All @@ -316,4 +318,5 @@ def _set_requires_grad(self):
if isinstance(layer, PytorchQuantizationWrapper):
if hasattr(layer.layer, BIAS):
bias = getattr(layer.layer, BIAS)
bias.requires_grad = self.gptq_config.train_bias
if bias is not None:
bias.requires_grad = self.gptq_config.train_bias
3 changes: 2 additions & 1 deletion model_compression_toolkit/gptq/pytorch/graph_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@ def get_gptq_trainable_parameters(fxp_model: nn.Module,

if add_bias and hasattr(layer.layer, BIAS):
bias = getattr(layer.layer, BIAS)
trainable_bias.append(bias)
if bias is not None:
trainable_bias.append(bias)

return trainable_aux_weights, trainable_bias, trainable_threshold

Expand Down

0 comments on commit 37e8a1c

Please sign in to comment.