Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ruff fixes #984

Merged
merged 16 commits into from
Jan 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
name: Lint

on:
push:
branches:
- main
pull_request:

jobs:
Lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: "3.12"
- uses: pre-commit/[email protected]
env:
RUFF_OUTPUT_FORMAT: github
8 changes: 8 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.1.15
hooks:
- id: ruff
args:
- --fix
# - id: ruff-format # TODO: enable when the time is right
6 changes: 2 additions & 4 deletions benchmarking/switchback/make_plot_with_jsonl.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os

import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd

cmap=plt.get_cmap('cool')

Expand Down
20 changes: 14 additions & 6 deletions benchmarking/switchback/speed_benchmark.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,22 @@
import json

import time

import torch
import torch.nn as nn

from bitsandbytes.triton.int8_matmul_mixed_dequantize import (
int8_matmul_mixed_dequantize,
)
from bitsandbytes.triton.int8_matmul_rowwise_dequantize import (
int8_matmul_rowwise_dequantize,
)
from bitsandbytes.triton.quantize_columnwise_and_transpose import (
quantize_columnwise_and_transpose,
)
from bitsandbytes.triton.quantize_global import (
quantize_global,
quantize_global_transpose,
)
from bitsandbytes.triton.quantize_rowwise import quantize_rowwise
from bitsandbytes.triton.quantize_columnwise_and_transpose import quantize_columnwise_and_transpose
from bitsandbytes.triton.int8_matmul_rowwise_dequantize import int8_matmul_rowwise_dequantize
from bitsandbytes.triton.quantize_global import quantize_global, quantize_global_transpose
from bitsandbytes.triton.int8_matmul_mixed_dequantize import int8_matmul_mixed_dequantize

# KNOW ISSUE: need to optimize "w_quantize_colwise_transpose" when embeddim is too large.

Expand Down
4 changes: 2 additions & 2 deletions bitsandbytes/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

from . import cuda_setup, utils, research
from . import cuda_setup, research, utils
from .autograd._functions import (
MatmulLtState,
bmm_cublas,
matmul,
matmul_4bit,
matmul_cublas,
mm_cublas,
matmul_4bit
)
from .cextension import COMPILED_WITH_CUDA
from .nn import modules
Expand Down
103 changes: 45 additions & 58 deletions bitsandbytes/__main__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
import os
from os.path import isdir
import sys
import shlex
import subprocess

from warnings import warn
from typing import Tuple
from os.path import isdir

import torch

Expand All @@ -20,7 +16,7 @@ def find_file_recursive(folder, filename):
out = glob.glob(os.path.join(folder, "**", filename + ext))
outs.extend(out)
except Exception as e:
raise RuntimeError('Error: Something when wrong when trying to find file. {e}')
raise RuntimeError('Error: Something when wrong when trying to find file.') from e

return outs

Expand Down Expand Up @@ -62,14 +58,11 @@ def generate_bug_report_information():
print_header(f"{path} CUDA PATHS")
paths = find_file_recursive(path, '*cuda*')
print(paths)
except:
print(f'Could not read LD_LIBRARY_PATH: {path}')
except Exception as e:
print(f'Could not read LD_LIBRARY_PATH: {path} ({e})')
print('')





def print_header(
txt: str, width: int = HEADER_WIDTH, filler: str = "+"
) -> None:
Expand All @@ -78,67 +71,61 @@ def print_header(


def print_debug_info() -> None:
from . import PACKAGE_GITHUB_URL
print(
"\nAbove we output some debug information. Please provide this info when "
f"creating an issue via {PACKAGE_GITHUB_URL}/issues/new/choose ...\n"
)


generate_bug_report_information()
def main():
generate_bug_report_information()

from . import COMPILED_WITH_CUDA
from .cuda_setup.main import get_compute_capabilities

from . import COMPILED_WITH_CUDA, PACKAGE_GITHUB_URL
from .cuda_setup.env_vars import to_be_ignored
from .cuda_setup.main import get_compute_capabilities

print_header("OTHER")
print(f"COMPILED_WITH_CUDA = {COMPILED_WITH_CUDA}")
print(f"COMPUTE_CAPABILITIES_PER_GPU = {get_compute_capabilities()}")
print_header("")
print_header("DEBUG INFO END")
print_header("")
print("Checking that the library is importable and CUDA is callable...")
print("\nWARNING: Please be sure to sanitize sensitive info from any such env vars!\n")

print_header("OTHER")
print(f"COMPILED_WITH_CUDA = {COMPILED_WITH_CUDA}")
print(f"COMPUTE_CAPABILITIES_PER_GPU = {get_compute_capabilities()}")
print_header("")
print_header("DEBUG INFO END")
print_header("")
print(
"""
Running a quick check that:
+ library is importable
+ CUDA function is callable
"""
)
print("\nWARNING: Please be sure to sanitize sensible info from any such env vars!\n")
try:
from bitsandbytes.optim import Adam

try:
from bitsandbytes.optim import Adam
p = torch.nn.Parameter(torch.rand(10, 10).cuda())
a = torch.rand(10, 10).cuda()

p = torch.nn.Parameter(torch.rand(10, 10).cuda())
a = torch.rand(10, 10).cuda()
p1 = p.data.sum().item()

p1 = p.data.sum().item()
adam = Adam([p])

adam = Adam([p])
out = a * p
loss = out.sum()
loss.backward()
adam.step()

out = a * p
loss = out.sum()
loss.backward()
adam.step()
p2 = p.data.sum().item()

p2 = p.data.sum().item()
assert p1 != p2
print("SUCCESS!")
print("Installation was successful!")
except ImportError:
print()
warn(
f"WARNING: {__package__} is currently running as CPU-only!\n"
"Therefore, 8-bit optimizers and GPU quantization are unavailable.\n\n"
f"If you think that this is so erroneously,\nplease report an issue!"
)
print_debug_info()
except Exception as e:
print(e)
print_debug_info()
sys.exit(1)

assert p1 != p2
print("SUCCESS!")
print("Installation was successful!")
sys.exit(0)

except ImportError:
print()
warn(
f"WARNING: {__package__} is currently running as CPU-only!\n"
"Therefore, 8-bit optimizers and GPU quantization are unavailable.\n\n"
f"If you think that this is so erroneously,\nplease report an issue!"
)
print_debug_info()
sys.exit(0)
except Exception as e:
print(e)
print_debug_info()
sys.exit(1)
if __name__ == "__main__":
main()
2 changes: 1 addition & 1 deletion bitsandbytes/autograd/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
from ._functions import undo_layout, get_inverse_transform_indices
from ._functions import get_inverse_transform_indices, undo_layout
6 changes: 3 additions & 3 deletions bitsandbytes/autograd/_functions.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import operator
import warnings
from dataclasses import dataclass
from functools import reduce # Required in Python 3
from typing import Tuple, Optional, Callable
import operator
from typing import Callable, Optional, Tuple
import warnings
from warnings import warn

import torch
Expand Down
9 changes: 3 additions & 6 deletions bitsandbytes/cextension.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
import ctypes as ct
import os
import torch

from pathlib import Path
from warnings import warn

from bitsandbytes.cuda_setup.main import CUDASetup
import torch

from bitsandbytes.cuda_setup.main import CUDASetup

setup = CUDASetup.get_instance()
if setup.initialized != True:
Expand All @@ -25,7 +22,7 @@
Inspect the output of the command and see if you can locate CUDA libraries. You might need to add them
to your LD_LIBRARY_PATH. If you suspect a bug, please take the information from python -m bitsandbytes
and open an issue at: https://github.com/TimDettmers/bitsandbytes/issues''')
lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
_ = lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
lib.get_context.restype = ct.c_void_p
lib.get_cusparse.restype = ct.c_void_p
lib.cget_managed_ptr.restype = ct.c_void_p
Expand Down
42 changes: 24 additions & 18 deletions bitsandbytes/cuda_setup/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,15 @@
"""

import ctypes as ct
import os
import errno
import os
from pathlib import Path
import platform
import torch
from typing import Set, Union
from warnings import warn
from itertools import product

from pathlib import Path
from typing import Set, Union
import torch

from .env_vars import get_potentially_lib_path_containing_env_vars

# these are the most common libs names
Expand Down Expand Up @@ -111,14 +111,16 @@ def manual_override(self):
if torch.cuda.is_available():
if 'BNB_CUDA_VERSION' in os.environ:
if len(os.environ['BNB_CUDA_VERSION']) > 0:
warn((f'\n\n{"="*80}\n'
'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
f'\n{"="*80}\n\n'))
warn(
f'\n\n{"=" * 80}\n'
'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
f'\n{"=" * 80}\n\n'
)
binary_name = self.binary_name.rsplit(".", 1)[0]
suffix = ".so" if os.name != "nt" else ".dll"
self.binary_name = binary_name[:-3] + f'{os.environ["BNB_CUDA_VERSION"]}.{suffix}'
Expand Down Expand Up @@ -207,7 +209,7 @@ def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
try:
if path.exists():
existent_directories.add(path)
except PermissionError as pex:
except PermissionError:
# Handle the PermissionError first as it is a subtype of OSError
# https://docs.python.org/3/library/exceptions.html#exception-hierarchy
pass
Expand All @@ -217,8 +219,10 @@ def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:

non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
f"be non-existent: {non_existent_directories}", is_warning=False)
CUDASetup.get_instance().add_log_entry(
f"The following directories listed in your path were found to be non-existent: {non_existent_directories}",
is_warning=False,
)

return existent_directories

Expand Down Expand Up @@ -360,8 +364,10 @@ def evaluate_cuda_setup():
cuda_version_string = get_cuda_version()

cuda_setup.add_log_entry(f"CUDA SETUP: PyTorch settings found: CUDA_VERSION={cuda_version_string}, Highest Compute Capability: {cc}.")
cuda_setup.add_log_entry(f"CUDA SETUP: To manually override the PyTorch CUDA version please see:"
"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md")
cuda_setup.add_log_entry(
"CUDA SETUP: To manually override the PyTorch CUDA version please see:"
"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md"
)


# 7.5 is the minimum CC vor cublaslt
Expand Down
Loading
Loading