Skip to content

Commit

Permalink
improve coverage
Browse files Browse the repository at this point in the history
  • Loading branch information
irenaby committed Oct 6, 2024
1 parent 4b84a90 commit ba6433c
Show file tree
Hide file tree
Showing 6 changed files with 21 additions and 15 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from model_compression_toolkit.core.common.hessian.hessian_scores_request import HessianScoresRequest, \
HessianScoresGranularity, HessianMode
from model_compression_toolkit.logger import Logger
if TYPE_CHECKING:
if TYPE_CHECKING: # pragma: no cover
from model_compression_toolkit.core.common import BaseNode


Expand Down Expand Up @@ -251,9 +251,9 @@ def compute_trackable_per_sample_hessian(self,
hessian_score_by_image_hash = {}

if not isinstance(inputs_batch, list):
raise TypeError('Expected a list of inputs')
raise TypeError('Expected a list of inputs') # pragma: no cover
if len(inputs_batch) > 1:
raise NotImplementedError('Per-sample hessian computation is not supported for networks with multiple inputs')
raise NotImplementedError('Per-sample hessian computation is not supported for networks with multiple inputs') # pragma: no cover

# Get the framework-specific calculator Hessian-approximation scores
fw_hessian_calculator = self.fw_impl.get_hessian_scores_calculator(graph=self.graph,
Expand All @@ -271,7 +271,7 @@ def compute_trackable_per_sample_hessian(self,

@staticmethod
def calc_image_hash(image):
if len(image.shape) != 3:
if len(image.shape) != 3: # pragma: no cover
raise ValueError(f'Expected 3d image (without batch) for image hash calculation, got {len(image.shape)}')
image_bytes = image.astype(np.float32).tobytes()
return hashlib.md5(image_bytes).hexdigest()
Expand All @@ -296,7 +296,7 @@ def fetch_hessian(self,
OC for per-output-channel when the requested node has OC output-channels, etc.)
"""

if len(hessian_scores_request.target_nodes) == 0:
if len(hessian_scores_request.target_nodes) == 0: # pragma: no cover
return []

if required_size == 0:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def _generate_random_vectors_batch(self, shape, distribution: HessianEstimationD
v[v == 0] = -1
return v

raise ValueError(f'Unknown distribution {distribution}')
raise ValueError(f'Unknown distribution {distribution}') # pragma: no cover

def compute(self) -> List[np.ndarray]:
"""
Expand All @@ -129,7 +129,7 @@ def compute(self) -> List[np.ndarray]:
elif self.hessian_request.granularity == HessianScoresGranularity.PER_OUTPUT_CHANNEL:
hessian_scores = self._compute_per_channel(output, target_activation_tensors)
else:
raise NotImplementedError(f'{self.hessian_request.granularity} is not supported')
raise NotImplementedError(f'{self.hessian_request.granularity} is not supported') # pragma: no cover

# Convert results to list of numpy arrays
hessian_results = [torch_tensor_to_numpy(h) for h in hessian_scores]
Expand Down
11 changes: 6 additions & 5 deletions model_compression_toolkit/gptq/pytorch/quantization_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,9 @@ def get_pytorch_gptq_config(n_epochs: int,
bias_optimizer = torch.optim.SGD([torch.Tensor([])], lr=LR_BIAS_DEFAULT, momentum=GPTQ_MOMENTUM)

if use_hessian_sample_attention:
if not use_hessian_based_weights:
if not use_hessian_based_weights: # pragma: no cover
raise ValueError('use_hessian_based_weights must be set to True in order to use Sample Layer Attention.')

hessian_weights_config = GPTQHessianScoresConfig(
hessians_num_samples=None,
norm_scores=False,
Expand All @@ -129,9 +130,9 @@ def get_pytorch_gptq_config(n_epochs: int,
gradual_quant_config = GradualActivationQuantizationConfig() if gradual_activation_quantization else None
elif isinstance(gradual_activation_quantization, GradualActivationQuantizationConfig):
gradual_quant_config = gradual_activation_quantization
else:
else: # pragma: no cover
raise TypeError(f'gradual_activation_quantization argument should be bool or '
f'GradualActivationQuantizationConfig, received {type(gradual_activation_quantization)}') # pragma: no cover
f'GradualActivationQuantizationConfig, received {type(gradual_activation_quantization)}')

return GradientPTQConfig(n_epochs, optimizer, optimizer_rest=optimizer_rest, loss=loss,
log_function=log_function, train_bias=True, optimizer_bias=bias_optimizer,
Expand Down Expand Up @@ -205,11 +206,11 @@ def pytorch_gradient_post_training_quantization(model: Module,
"""

if core_config.is_mixed_precision_enabled:
if core_config.is_mixed_precision_enabled: # pragma: no cover
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfig):
Logger.critical("Given quantization config for mixed-precision is not of type 'MixedPrecisionQuantizationConfig'. "
"Ensure usage of the correct API for 'pytorch_gradient_post_training_quantization' "
"or provide a valid mixed-precision configuration.") # pragma: no cover
"or provide a valid mixed-precision configuration.")

tb_w = init_tensorboard_writer(DEFAULT_PYTORCH_INFO)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def __call__(self, model: nn.Module, entropy_reg: float, layer_weights: torch.Te
if layer_weights is None:
layer_weights = torch.ones((len(layers),))
if len(layer_weights.shape) != 1 or layer_weights.shape[0] != len(layers):
raise ValueError(f'Expected weights to be a vector of length {len(layers)}, received {layer_weights.shape}.')
raise ValueError(f'Expected weights to be a vector of length {len(layers)}, received {layer_weights.shape}.') # pragma: no cover
max_w = layer_weights.max()

b = self.beta_scheduler(self.count_iter)
Expand Down
7 changes: 5 additions & 2 deletions tests/pytorch_tests/model_tests/feature_models/gptq_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import mct_quantizers
from model_compression_toolkit import DefaultDict
from model_compression_toolkit.constants import GPTQ_HESSIAN_NUM_SAMPLES
from model_compression_toolkit.core.common.hessian import HessianEstimationDistribution
from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
from model_compression_toolkit.gptq.common.gptq_constants import QUANT_PARAM_LEARNING_STR, MAX_LSB_STR
from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
Expand Down Expand Up @@ -59,7 +60,7 @@ def __init__(self, unit_test, weights_bits=8, weights_quant_method=QuantizationM
hessian_weights=True, norm_scores=True, log_norm_weights=True, scaled_log_norm=False, params_learning=True,
num_calibration_iter=GPTQ_HESSIAN_NUM_SAMPLES, gradual_activation_quantization=False,
hessian_num_samples=GPTQ_HESSIAN_NUM_SAMPLES, sample_layer_attention=False,
loss=multiple_tensors_mse_loss, hessian_batch_size=1):
loss=multiple_tensors_mse_loss, hessian_batch_size=1, estimator_distribution=HessianEstimationDistribution.GAUSSIAN):
super().__init__(unit_test, input_shape=(3, 16, 16), num_calibration_iter=num_calibration_iter)
self.seed = 0
self.rounding_type = rounding_type
Expand All @@ -78,6 +79,7 @@ def __init__(self, unit_test, weights_bits=8, weights_quant_method=QuantizationM
self.sample_layer_attention = sample_layer_attention
self.loss = loss
self.hessian_batch_size = hessian_batch_size
self.estimator_distribution = estimator_distribution

def get_quantization_config(self):
return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING,
Expand Down Expand Up @@ -154,7 +156,8 @@ def get_gptq_config(self):
norm_scores=self.norm_scores,
per_sample=self.sample_layer_attention,
hessians_num_samples=self.hessian_num_samples,
hessian_batch_size=self.hessian_batch_size),
hessian_batch_size=self.hessian_batch_size,
estimator_distribution=self.estimator_distribution),


gptq_quantizer_params_override=self.override_params,
Expand Down
2 changes: 2 additions & 0 deletions tests/pytorch_tests/model_tests/test_feature_models_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import torch
from torch import nn
import model_compression_toolkit as mct
from model_compression_toolkit.core.common.hessian import HessianEstimationDistribution
from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting
from model_compression_toolkit.core.common.network_editors import NodeTypeFilter, NodeNameFilter
from model_compression_toolkit.gptq.common.gptq_config import RoundingType
Expand Down Expand Up @@ -658,6 +659,7 @@ def test_gptq_with_gradual_activation(self):
def test_gptq_with_sample_layer_attention(self):
kwargs = dict(sample_layer_attention=True, loss=sample_layer_attention_loss,
hessian_weights=True, hessian_num_samples=None,
estimator_distribution=HessianEstimationDistribution.RADEMACHER,
norm_scores=False, log_norm_weights=False, scaled_log_norm=False)
GPTQAccuracyTest(self, **kwargs).run_test()
GPTQAccuracyTest(self, hessian_batch_size=16, rounding_type=RoundingType.SoftQuantizer, **kwargs).run_test()
Expand Down

0 comments on commit ba6433c

Please sign in to comment.