We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent e8053dd commit 143ec4bCopy full SHA for 143ec4b
src/llmcompressor/modifiers/quantization/gptq/utils/gptq_wrapper.py
@@ -259,7 +259,12 @@ def compress(
259
self._log_metrics(tick, Losses)
260
261
if strategy == QuantizationStrategy.GROUP:
262
- if actorder == ActivationOrderingStrategy.GROUP:
+ if actorder == ActivationOrderingStrategy.WEIGHT:
263
+ # restore original permutation
264
+ invperm = torch.argsort(perm)
265
+ W = W[:, invperm]
266
+
267
+ elif actorder == ActivationOrderingStrategy.GROUP:
268
# restore original permutation
269
invperm = torch.argsort(perm)
270
W = W[:, invperm]
0 commit comments