Skip to content

Commit

Permalink
clearer assertions
Browse files Browse the repository at this point in the history
  • Loading branch information
justheuristic committed Sep 17, 2022
1 parent e29c5f5 commit fc4a135
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions bitsandbytes/autograd/_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,8 +232,8 @@ def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState()):
# Cast A to fp16
A_dtype = A.dtype
if A_dtype != torch.float16:
warnings.warn(f"MatMul8bitLt: temporarily casting input matrix from {A_dtype} to float16")
A = A.to(torch.float16)
warnings.warn(f"MatMul8bitLt: input matrix will be converted from {A_dtype} to float16")
A = A.to(torch.float16)

# 1. Quantize A
if len(A.shape) == 3:
Expand Down

0 comments on commit fc4a135

Please sign in to comment.