From fc4a135ed1604d1f6190af725bea912e19e8a88a Mon Sep 17 00:00:00 2001 From: justheuristic Date: Sat, 17 Sep 2022 23:24:26 +0300 Subject: [PATCH] clearer assertions --- bitsandbytes/autograd/_functions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitsandbytes/autograd/_functions.py b/bitsandbytes/autograd/_functions.py index 6d473e940..f4a6d5736 100644 --- a/bitsandbytes/autograd/_functions.py +++ b/bitsandbytes/autograd/_functions.py @@ -232,8 +232,8 @@ def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState()): # Cast A to fp16 A_dtype = A.dtype if A_dtype != torch.float16: - warnings.warn(f"MatMul8bitLt: temporarily casting input matrix from {A_dtype} to float16") - A = A.to(torch.float16) + warnings.warn(f"MatMul8bitLt: input matrix will be converted from {A_dtype} to float16") + A = A.to(torch.float16) # 1. Quantize A if len(A.shape) == 3: