From e3b661a21ead583bfbf5194134bb31e6c8a6d8ce Mon Sep 17 00:00:00 2001 From: My Nguyen <46087525+chiffonng@users.noreply.github.com> Date: Wed, 12 Feb 2025 09:15:35 +0100 Subject: [PATCH] Fix imports and type errors (#12) * Remove old code for mnemonic classification * Enhance type hints and error handling * Update imports for consistency --- src/app/app.py | 41 ++-- src/data/data_loaders.py | 17 +- src/data/data_processing.py | 20 +- src/data/mnemonic_processing.py | 360 -------------------------------- src/utils/aliases.py | 2 +- src/utils/error_handling.py | 11 +- uv.lock | 44 ++-- 7 files changed, 82 insertions(+), 413 deletions(-) delete mode 100644 src/data/mnemonic_processing.py diff --git a/src/app/app.py b/src/app/app.py index df6975e..5d9bbec 100644 --- a/src/app/app.py +++ b/src/app/app.py @@ -11,10 +11,11 @@ import spaces import torch from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer +from unsloth import FastLanguageModel DESCRIPTION = """ This is a demo for the Google Gemma 2 9B IT model. Use it to generate mnemonics for English words you want to learn and remember. -Input your instructions or start with one of the examples provided. The input supports a subset of markdown formatting such as bold, italics, code, tables. You can also use the following special tokens to customize the mnemonic: +Input your instructions or start with one of the examples provided. The input supports a subset of markdown formatting such as bold, italics, code, tables. """ MAX_MAX_NEW_TOKENS = 2048 @@ -23,18 +24,32 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -model_id = "google/gemma-2-9b-it" -tokenizer = AutoTokenizer.from_pretrained(model_id) -model = AutoModelForCausalLM.from_pretrained( - model_id, - device_map="auto", - torch_dtype=torch.bfloat16, -) -model.config.sliding_window = 4096 -model.eval() - +# model_id = "google/gemma-2-9b-it" +model_id = "unsloth/gemma-2-9b-it" -@spaces.GPU(duration=90) +model, tokenizer = FastLanguageModel.from_pretrained( + model_name=model_id, + max_seq_length=MAX_INPUT_TOKEN_LENGTH, + dtype=None, + load_in_4bit=True, + device=device, + cache_dir="models", +) +FastLanguageModel.for_inference(model) +# tokenizer = AutoTokenizer.from_pretrained(model_id) +# model = AutoModelForCausalLM.from_pretrained( +# model_id, +# device_map="auto", +# load_in_4bit=True, +# torch_dtype=torch.bfloat16, +# cache_dir="models", +# ) +# model.config.sliding_window = 4096 +# model.eval() + + +# Uncomment to use Hugging Face Spaces GPU +# @spaces.GPU(duration=90) def generate( message: str, chat_history: list[dict], @@ -159,4 +174,4 @@ def generate( if __name__ == "__main__": - demo.queue(max_size=20).launch() + demo.queue(max_size=20).launch(sharer=True) diff --git a/src/data/data_loaders.py b/src/data/data_loaders.py index a2bd99f..dfaa62f 100644 --- a/src/data/data_loaders.py +++ b/src/data/data_loaders.py @@ -10,10 +10,10 @@ from datasets import Dataset -import utils.constants as c -from utils.aliases import ExtensionsType, PathLike -from utils.common import login_hf_hub -from utils.error_handling import check_dir_path, check_file_path +from src.utils import constants as c +from src.utils.aliases import ExtensionsType, PathLike +from src.utils.common import login_hf_hub +from src.utils.error_handling import check_dir_path, check_file_path # Set up logging to console logger = logging.getLogger(__name__) @@ -63,7 +63,7 @@ def load_local_dataset(file_path: PathLike, **kwargs) -> "Dataset": def load_hf_dataset( repo_id: Optional[str] = None, to_csv: bool = False, - file_path: PathLike = None, + file_path: "Optional[PathLike]" = None, **kwargs, ) -> "DatasetDict": """Load a dataset from the Hugging Face hub. @@ -87,6 +87,10 @@ def load_hf_dataset( if to_csv: file_path = check_file_path(file_path, new_ok=True, extensions=c.CSV_EXT) + if not file_path: + raise ValueError( + "Invalid file path. Must be a valid path of csv to save the dataset to." + ) dataset.to_csv(file_path) logger.info(f"Saved dataset to {file_path}.") else: @@ -106,4 +110,7 @@ def load_hf_dataset( if __name__ == "__main__": # Load a dataset from the Hugging Face hub mnemonic_dataset: "Dataset" = load_hf_dataset() + test_dataset: "Dataset" = load_hf_dataset( + repo_id="nbalepur/Mnemonic_Test", split="train" + ) logger.info(f"\n\n{mnemonic_dataset}") diff --git a/src/data/data_processing.py b/src/data/data_processing.py index 81de4bc..5f26145 100644 --- a/src/data/data_processing.py +++ b/src/data/data_processing.py @@ -16,11 +16,11 @@ if TYPE_CHECKING: from datasets import Dataset, DatasetDict -import utils.constants as c -from data.data_loaders import load_local_dataset -from utils.aliases import ExtensionsType, PathLike -from utils.common import login_hf_hub -from utils.error_handling import check_dir_path, check_file_path +from src.data.data_loaders import load_local_dataset +from src.utils import constants as c +from src.utils.aliases import ExtensionsType, PathLike +from src.utils.common import login_hf_hub +from src.utils.error_handling import check_dir_path, check_file_path # Set up logging to console logger = logging.getLogger(__name__) @@ -45,6 +45,10 @@ def load_clean_txt_csv_data(dir_path: PathLike) -> pd.DataFrame: """ df = pd.DataFrame() file_paths = check_dir_path(dir_path, extensions=[c.TXT_EXT, c.CSV_EXT]) + + if not file_paths or isinstance(file_paths, Path): + raise FileNotFoundError(f"No txt or csv files found in '{dir_path}'.") + logger.info(f"Loading txt/csv files from {[str(p) for p in file_paths]}.") # Read only the first two columns, skipping the first two rows @@ -126,10 +130,10 @@ def combine_datasets( Raises: ValueError: If the provided output format is not 'csv' or 'parquet'. """ - input_dir = check_dir_path(input_dir) + checked_input_dir = check_dir_path(input_dir) # Load and combine the datasets - combined_df = load_clean_txt_csv_data(input_dir) + combined_df = load_clean_txt_csv_data(checked_input_dir) # Clean the data combined_df.drop_duplicates(subset=[c.TERM_COL], inplace=True, keep="first") @@ -176,7 +180,7 @@ def train_test_split(dataset: "Dataset", test_size: float = 0.2) -> "DatasetDict def push_to_hf_hub( dataset: "Dataset", - repo_id: str = c.HF_DATASET_REPO, + repo_id: str = c.HF_DATASET_NAME, private: bool = False, **kwargs, ): diff --git a/src/data/mnemonic_processing.py b/src/data/mnemonic_processing.py deleted file mode 100644 index 46253cb..0000000 --- a/src/data/mnemonic_processing.py +++ /dev/null @@ -1,360 +0,0 @@ -"""Module for processing mnemonics, including code to classify, standardize or diversify them using OpenAI.""" - -import logging -from pathlib import Path -from typing import TYPE_CHECKING, no_type_check -from warnings import warn - -import pandas as pd -from dotenv import load_dotenv -from openai import LengthFinishReasonError, OpenAI, OpenAIError, RateLimitError -from pydantic import BaseModel, ValidationError -from pydantic.functional_validators import AfterValidator -from tenacity import ( - after_log, - before_log, - retry, - retry_if_exception_type, - stop_after_attempt, - wait_random_exponential, -) -from tqdm import tqdm -from typing_extensions import Annotated -from yaml import safe_load - -if TYPE_CHECKING: - from openai import Response - -from utils.aliases import PathLike -from utils.constants import ( - CLASSIFIED_DATASET_CSV, - CLASSIFIED_DATASET_PARQUET, - COMBINED_DATASET_CSV, - COMBINED_DATASET_PARQUET, - CSV_EXT, - PARQUET_EXT, -) -from utils.error_handling import check_file_path, which_file_exists - -load_dotenv() # Load environment variables - -# Set up logging to file -logger = logging.getLogger(__name__) -logger.setLevel(logging.DEBUG) -logger.addHandler(logging.FileHandler("logs/mnemonic_processing.log")) -formatter = logging.Formatter( - "%(asctime)s - %(levelname)s - %(funcName)s - %(message)s" -) -logger.handlers[0].setFormatter(formatter) - -# Initialize OpenAI client -client = OpenAI() - -# Load config and prompts -with Path("config/classify_mnemonics.yaml").open("r") as f: - classification_conf = safe_load(f) # dict of config - batch_size = classification_conf["batch_size"] - - -def validate_classification(value: int) -> int: - """Validate classification value to be -1, 0, 1, or 2. Otherwise, return -1.""" - return value if value in {-1, 0, 1, 2} else -1 - - -ValidClassification = Annotated[int, AfterValidator(validate_classification)] - - -# Mnemonic classification schema -class ClassificationSchema(BaseModel): - """Pydantic schema for the classification of mnemonics.""" - - classifications: list[ValidClassification] - - -def combine_key_value(path: PathLike) -> list[str]: - """Load 2-column data from a file, to format: key: value. - - Args: - path (PathLike): The path to the file containing the 2-column data. - - Returns: - combined_col (list[str]): The combined key and value columns. - """ - path_obj: Path = check_file_path(path, extensions=[PARQUET_EXT, CSV_EXT]) - - if path_obj.suffix == PARQUET_EXT: - df = pd.read_parquet(path, engine="pyarrow") - elif path_obj.suffix == CSV_EXT: - df = pd.read_csv(path, header="infer", quotechar='"') - - logger.info(f"Read {df.shape[0]} rows from {str(path)}.") - - if df.shape[1] > 2: - warn( - "More than 2 columns detected. Only the first 2 columns will be used.", - category=UserWarning, - stacklevel=2, - ) - logger.warning( - "More than 2 columns detected. Only the first 2 columns will be used for processing." - ) - elif df.shape[1] < 2: - raise ValueError("File must have at least 2 columns.") - - combined_col = df.iloc[:, 0] + ": " + df.iloc[:, 1] - - return combined_col.to_list() - - -def create_batches(data: list[str], batch_size=batch_size) -> list[str]: - """Build batches of text data to send to OpenAI's API. - - Args: - data (list[str]): The list of data to process. - batch_size (int, optional): The number of mnemonics to include in each batch. Defaults to batch_size read from the config. - - Returns: - flattened_batches (list[str]): The list of batches, each item is a batch of text data - - Raises: - ValueError: if no data is provided or if the batch size is invalid. - """ - if not data: - raise ValueError("No data to process.") - if batch_size < 1 or batch_size > len(data): - warning = f"Batch size must be between 1 and the number of mnemonics ({len(data)}). Adjusting batch size to {len(data)}." - warn(warning, category=UserWarning, stacklevel=2) - logger.warning(warning) - batch_size = min(batch_size, len(data)) - - logger.info(f"Creating batches of {batch_size} mnemonics.") - batches = [data[i : i + batch_size] for i in range(0, len(data), batch_size)] - flattened_batches = ["\n".join(batch) for batch in batches] - logger.info(f"Created {len(batches)} batches of mnemonics.") - - return flattened_batches - - -@retry( - retry=retry_if_exception_type(RateLimitError), - stop=stop_after_attempt(3), - wait=wait_random_exponential(multiplier=1, min=0, max=4), # 2^0 to 2^4 seconds - before=before_log(logger, logging.WARNING), - after=after_log(logger, logging.WARNING), -) -def classify_mnemonics_api(batches: str | list[str]): - """Classify mnemonics using OpenAI's API, GPT-4o mini and return the responses as JSON array of numbers. Retry up to 3 times if rate limited. - - Args: - batches (list[str]): The list of batches of mnemonics to categorize. - - Returns: - classification_by_batch (list[ValidClassification]): The list of parsed categories. - - Raises: - ValueError: - - If the output file is not in parquet or csv - - If the input (batches) is not a list of strings. - """ - if not isinstance(batches, (list, str)): - raise ValueError( - f"Batches must be a string or a list of strings. Current type: {type(batches)}" - ) - batches = [batches] if isinstance(batches, str) else batches - - logger.info(f"Processing {len(batches)} batches...") - logger.info( - f"Configurations: batch_size={batch_size}, model={classification_conf['model']}, temperature={classification_conf['temperature']}, num_outputs={classification_conf['num_outputs']}." - ) - - classification_by_batch = [] - for i, batch in tqdm(enumerate(batches), desc="Processing batches", unit="batch"): - classification_msg = get_structured_response( - i, - batch, - model_config=classification_conf, - response_format=ClassificationSchema, - ) - classification_batch_i = parse_structured_response(classification_msg, batch, i) - classification_by_batch.extend(classification_batch_i) - - logger.info(f"Returned {len(classification_by_batch)} classifications.") - return classification_by_batch - - -def get_structured_response( - i: int, - batch: str, - model_config: dict, - response_format: BaseModel = ClassificationSchema, -) -> dict: # mypy: ignore - """Get response from OpenAI API. Documentation: https://platform.openai.com/docs/guides/structured-outputs/how-to-use. - - Args: - i (int): The index of the batch. - batch (str): The batch of mnemonics to classify. - model_config (dict): The model configuration. - response_format (BaseModel, optional): The response format. Defaults to ClassificationSchema. - - Returns: - structure_msg (dict = openai.Response...message): The structured message object. - """ - try: - structure_msg = ( - client.beta.chat.completions.parse( - model=model_config["model"], - messages=[ - {"role": "system", "content": model_config["prompts"]["system"]}, - { - "role": "user", - "content": model_config["prompts"]["user"] + batch, - }, - ], - max_tokens=batch_size * 3 + 1, # 3 tokens per mnemonic - temperature=model_config["temperature"], - n=model_config["num_outputs"], - response_format=response_format, - ) - .choices[0] - .message - ) - if structure_msg.refusal: - logger.error(f"Batch {i+1}: OpenAI refused to process the request.") - raise OpenAIError("OpenAI refused to process the request.") - - return structure_msg - - except Exception as e: - if isinstance(e, LengthFinishReasonError): - logger.error(f"LengthFinishReasonError: {e}") - raise ValueError( - "OpenAI run out of tokens. Please try: reducing the batch_size, or increasing the max_tokens parameter." - ) from e - else: - logger.error(f"Exception: {e}") - raise e - - -@no_type_check -def parse_structured_response( - structure_msg: object, - batch: str, - batch_index: int, -) -> list[int]: - """Parse the structured message from OpenAI's API. - - Args: - structure_msg (openai.Response...message): The structured message object. - batch (str): The batch of mnemonics. - batch_index (int): The index of the batch. - - Returns: - (list[int]): The list of parsed categories. - """ - try: - if structure_msg.parsed: - classification_batch_i = structure_msg.parsed.classifications - batch_i_size = len(batch.split("\n")) - classification_i_size = len(classification_batch_i) - - # Log batch debug info - logger.debug( - f"Batch {batch_index+1} with {batch_i_size} mnemonics: {classification_i_size} classifications." - ) - logger.debug( - f"Batch {batch_index+1} classifications: {classification_batch_i}" - ) - logger.debug( - f"Batch {batch_index+1} types: {type(classification_batch_i[0])}" - ) - - # Handle when the number of classifications does not match the number of mnemonics - if classification_i_size > batch_i_size: - logger.warning( - f"Batch {batch_index+1}: Number of classifications {classification_i_size} exceeds the number of mnemonics {batch_i_size}. Truncating to match the number of mnemonics..." - ) - return classification_batch_i[:batch_i_size] - - elif classification_i_size < batch_i_size: - logger.warning( - f"Batch {batch_index+1}: Number of classifications {classification_i_size} is less than the number of mnemonics {batch_i_size}. Padding with -1..." - ) - return classification_batch_i + [-1] * ( - batch_i_size - classification_i_size - ) - - else: # classification_i_size == batch_i_size - return classification_batch_i - - except ValidationError as e: - logger.error(f"ValidationError: {e}") - raise ValueError( - f"Batch {batch_index+1}: The response didn't match the expected format. Check the logs for more details." - ) from e - - -def save_structured_outputs( - outputs: list[ValidClassification], input_path: PathLike, output_path: PathLike -): - """Save the classification results to an existing file of mnemonics. - - Args: - outputs (list[ValidClassification]): The list of parsed categories. - input_path (PathLike): The path to the file containing the mnemonics. - output_path (PathLike): The path to .csv or .parquet file to write the parsed. - - Raises: - ValueError: If the output file is not in parquet or csv format. - """ - # Set up output path - output_path = Path(output_path) - output_path.parent.mkdir(parents=True, exist_ok=True) - - # Read initial dataset to get the number of rows - input_path = check_file_path( - input_path, new_ok=True, extensions=[PARQUET_EXT, CSV_EXT] - ) - df = ( - pd.read_csv(input_path) - if input_path.suffix == CSV_EXT - else pd.read_parquet(input_path) - ) - if len(df) != len(outputs): - error_msg = f"Number of rows in the file does not match the number of categories. Number of rows: {len(df)}, number of categories: {len(outputs)}" - logger.error(error_msg) - raise ValueError(error_msg) - - # Add the categories column and save to the requested format - df["category"] = outputs - save_func = df.to_parquet if output_path.suffix == PARQUET_EXT else df.to_csv - save_func(output_path, index=False) - logger.info(f"Saved classification results to {str(output_path)}.") - - -def standardize_mnemonics_api(batches): - """Standardize mnemonics using OpenAI's API, GPT-4o mini.""" - raise NotImplementedError - - -def diversify_mnemonics_api(batches): - """Diversify mnemonics using OpenAI's API, GPT-4o mini.""" - raise NotImplementedError - - -def classify_mnemonics(input_path: str, output_path: str): - """End-to-end function for classifying mnemonics. - - Args: - input_path (str): The path to the file containing the mnemonics. - output_path (str): The path to the file to save the classification results. - - Raises: - ValueError: If the output file is not in parquet or csv format. - """ - data = combine_key_value(input_path) - batches = create_batches(data) - classifications = classify_mnemonics_api(batches) - save_structured_outputs(classifications, input_path, output_path) - - -classify_mnemonics(COMBINED_DATASET_CSV, CLASSIFIED_DATASET_CSV) diff --git a/src/utils/aliases.py b/src/utils/aliases.py index d45d814..ddff19b 100644 --- a/src/utils/aliases.py +++ b/src/utils/aliases.py @@ -4,5 +4,5 @@ from typing import TypeAlias # Type aliases -PathLike: TypeAlias = str | Path +PathLike: TypeAlias = str | Path | None | list[str] | list[Path] ExtensionsType: TypeAlias = list[str] | str | None diff --git a/src/utils/error_handling.py b/src/utils/error_handling.py index 458366d..f7908ba 100644 --- a/src/utils/error_handling.py +++ b/src/utils/error_handling.py @@ -5,7 +5,7 @@ from typing import Optional, TypeAlias from warnings import warn -from utils.aliases import ExtensionsType, PathLike +from src.utils.aliases import ExtensionsType, PathLike def validate_path(path: PathLike) -> Path: @@ -39,6 +39,9 @@ def validate_and_normalize_extensions(extensions: ExtensionsType) -> list[str]: Raises: TypeError: If 'extensions' is not a string or a list of strings. """ + if extensions is None: + return [] + if isinstance(extensions, str): extensions = [extensions] elif not all(isinstance(ext, str) for ext in extensions): @@ -129,19 +132,19 @@ def check_dir_path( def which_file_exists( - *files: list[PathLike], extensions: Optional[ExtensionsType] = None + *files: PathLike, extensions: Optional[ExtensionsType] = None ) -> Path: """Return the first file found in the list of files. Optionally, return the first file with the specified extensions. Args: - files (list[PathLike]): The list of files to check. + files (PathLike): The list of file paths to check. extensions (list[str], optional): A list of allowed file extensions. Defaults to []. Returns: file_path (Path): The first file found in the list. """ for file in files: - file_path: Path = check_file_path(file, new_ok=True, extensions=extensions) + file_path = check_file_path(file, new_ok=True, extensions=extensions) if file_path.exists(): return file_path diff --git a/uv.lock b/uv.lock index ea03586..c4ac07d 100644 --- a/uv.lock +++ b/uv.lock @@ -1549,7 +1549,7 @@ requires-dist = [ { name = "ruff", specifier = ">=0.7.1" }, { name = "spaces", specifier = ">=0.31.0" }, { name = "tenacity", specifier = ">=9.0.0" }, - { name = "torch", specifier = ">=2.5.1" }, + { name = "torch", specifier = ">=2.4.0" }, { name = "tqdm", specifier = ">=4.67.1" }, { name = "transformers" }, { name = "trl" }, @@ -2754,27 +2754,27 @@ wheels = [ [[package]] name = "ruff" -version = "0.8.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5e/2b/01245f4f3a727d60bebeacd7ee6d22586c7f62380a2597ddb22c2f45d018/ruff-0.8.2.tar.gz", hash = "sha256:b84f4f414dda8ac7f75075c1fa0b905ac0ff25361f42e6d5da681a465e0f78e5", size = 3349020 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/91/29/366be70216dba1731a00a41f2f030822b0c96c7c4f3b2c0cdce15cbace74/ruff-0.8.2-py3-none-linux_armv6l.whl", hash = "sha256:c49ab4da37e7c457105aadfd2725e24305ff9bc908487a9bf8d548c6dad8bb3d", size = 10530649 }, - { url = "https://files.pythonhosted.org/packages/63/82/a733956540bb388f00df5a3e6a02467b16c0e529132625fe44ce4c5fb9c7/ruff-0.8.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ec016beb69ac16be416c435828be702ee694c0d722505f9c1f35e1b9c0cc1bf5", size = 10274069 }, - { url = "https://files.pythonhosted.org/packages/3d/12/0b3aa14d1d71546c988a28e1b412981c1b80c8a1072e977a2f30c595cc4a/ruff-0.8.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f05cdf8d050b30e2ba55c9b09330b51f9f97d36d4673213679b965d25a785f3c", size = 9909400 }, - { url = "https://files.pythonhosted.org/packages/23/08/f9f08cefb7921784c891c4151cce6ed357ff49e84b84978440cffbc87408/ruff-0.8.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60f578c11feb1d3d257b2fb043ddb47501ab4816e7e221fbb0077f0d5d4e7b6f", size = 10766782 }, - { url = "https://files.pythonhosted.org/packages/e4/71/bf50c321ec179aa420c8ec40adac5ae9cc408d4d37283a485b19a2331ceb/ruff-0.8.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbd5cf9b0ae8f30eebc7b360171bd50f59ab29d39f06a670b3e4501a36ba5897", size = 10286316 }, - { url = "https://files.pythonhosted.org/packages/f2/83/c82688a2a6117539aea0ce63fdf6c08e60fe0202779361223bcd7f40bd74/ruff-0.8.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b402ddee3d777683de60ff76da801fa7e5e8a71038f57ee53e903afbcefdaa58", size = 11338270 }, - { url = "https://files.pythonhosted.org/packages/7f/d7/bc6a45e5a22e627640388e703160afb1d77c572b1d0fda8b4349f334fc66/ruff-0.8.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:705832cd7d85605cb7858d8a13d75993c8f3ef1397b0831289109e953d833d29", size = 12058579 }, - { url = "https://files.pythonhosted.org/packages/da/3b/64150c93946ec851e6f1707ff586bb460ca671581380c919698d6a9267dc/ruff-0.8.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:32096b41aaf7a5cc095fa45b4167b890e4c8d3fd217603f3634c92a541de7248", size = 11615172 }, - { url = "https://files.pythonhosted.org/packages/e4/9e/cf12b697ea83cfe92ec4509ae414dc4c9b38179cc681a497031f0d0d9a8e/ruff-0.8.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e769083da9439508833cfc7c23e351e1809e67f47c50248250ce1ac52c21fb93", size = 12882398 }, - { url = "https://files.pythonhosted.org/packages/a9/27/96d10863accf76a9c97baceac30b0a52d917eb985a8ac058bd4636aeede0/ruff-0.8.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fe716592ae8a376c2673fdfc1f5c0c193a6d0411f90a496863c99cd9e2ae25d", size = 11176094 }, - { url = "https://files.pythonhosted.org/packages/eb/10/cd2fd77d4a4e7f03c29351be0f53278a393186b540b99df68beb5304fddd/ruff-0.8.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:81c148825277e737493242b44c5388a300584d73d5774defa9245aaef55448b0", size = 10771884 }, - { url = "https://files.pythonhosted.org/packages/71/5d/beabb2ff18870fc4add05fa3a69a4cb1b1d2d6f83f3cf3ae5ab0d52f455d/ruff-0.8.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d261d7850c8367704874847d95febc698a950bf061c9475d4a8b7689adc4f7fa", size = 10382535 }, - { url = "https://files.pythonhosted.org/packages/ae/29/6b3fdf3ad3e35b28d87c25a9ff4c8222ad72485ab783936b2b267250d7a7/ruff-0.8.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:1ca4e3a87496dc07d2427b7dd7ffa88a1e597c28dad65ae6433ecb9f2e4f022f", size = 10886995 }, - { url = "https://files.pythonhosted.org/packages/e9/dc/859d889b4d9356a1a2cdbc1e4a0dda94052bc5b5300098647e51a58c430b/ruff-0.8.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:729850feed82ef2440aa27946ab39c18cb4a8889c1128a6d589ffa028ddcfc22", size = 11220750 }, - { url = "https://files.pythonhosted.org/packages/0b/08/e8f519f61f1d624264bfd6b8829e4c5f31c3c61193bc3cff1f19dbe7626a/ruff-0.8.2-py3-none-win32.whl", hash = "sha256:ac42caaa0411d6a7d9594363294416e0e48fc1279e1b0e948391695db2b3d5b1", size = 8729396 }, - { url = "https://files.pythonhosted.org/packages/f8/d4/ba1c7ab72aba37a2b71fe48ab95b80546dbad7a7f35ea28cf66fc5cea5f6/ruff-0.8.2-py3-none-win_amd64.whl", hash = "sha256:2aae99ec70abf43372612a838d97bfe77d45146254568d94926e8ed5bbb409ea", size = 9594729 }, - { url = "https://files.pythonhosted.org/packages/23/34/db20e12d3db11b8a2a8874258f0f6d96a9a4d631659d54575840557164c8/ruff-0.8.2-py3-none-win_arm64.whl", hash = "sha256:fb88e2a506b70cfbc2de6fae6681c4f944f7dd5f2fe87233a7233d888bad73e8", size = 9035131 }, +version = "0.9.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/02/74/6c359f6b9ed85b88df6ef31febce18faeb852f6c9855651dfb1184a46845/ruff-0.9.5.tar.gz", hash = "sha256:11aecd7a633932875ab3cb05a484c99970b9d52606ce9ea912b690b02653d56c", size = 3634177 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/4b/82b7c9ac874e72b82b19fd7eab57d122e2df44d2478d90825854f9232d02/ruff-0.9.5-py3-none-linux_armv6l.whl", hash = "sha256:d466d2abc05f39018d53f681fa1c0ffe9570e6d73cde1b65d23bb557c846f442", size = 11681264 }, + { url = "https://files.pythonhosted.org/packages/27/5c/f5ae0a9564e04108c132e1139d60491c0abc621397fe79a50b3dc0bd704b/ruff-0.9.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:38840dbcef63948657fa7605ca363194d2fe8c26ce8f9ae12eee7f098c85ac8a", size = 11657554 }, + { url = "https://files.pythonhosted.org/packages/2a/83/c6926fa3ccb97cdb3c438bb56a490b395770c750bf59f9bc1fe57ae88264/ruff-0.9.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d56ba06da53536b575fbd2b56517f6f95774ff7be0f62c80b9e67430391eeb36", size = 11088959 }, + { url = "https://files.pythonhosted.org/packages/af/a7/42d1832b752fe969ffdbfcb1b4cb477cb271bed5835110fb0a16ef31ab81/ruff-0.9.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f7cb2a01da08244c50b20ccfaeb5972e4228c3c3a1989d3ece2bc4b1f996001", size = 11902041 }, + { url = "https://files.pythonhosted.org/packages/53/cf/1fffa09fb518d646f560ccfba59f91b23c731e461d6a4dedd21a393a1ff1/ruff-0.9.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:96d5c76358419bc63a671caac70c18732d4fd0341646ecd01641ddda5c39ca0b", size = 11421069 }, + { url = "https://files.pythonhosted.org/packages/09/27/bb8f1b7304e2a9431f631ae7eadc35550fe0cf620a2a6a0fc4aa3d736f94/ruff-0.9.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:deb8304636ed394211f3a6d46c0e7d9535b016f53adaa8340139859b2359a070", size = 12625095 }, + { url = "https://files.pythonhosted.org/packages/d7/ce/ab00bc9d3df35a5f1b64f5117458160a009f93ae5caf65894ebb63a1842d/ruff-0.9.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:df455000bf59e62b3e8c7ba5ed88a4a2bc64896f900f311dc23ff2dc38156440", size = 13257797 }, + { url = "https://files.pythonhosted.org/packages/88/81/c639a082ae6d8392bc52256058ec60f493c6a4d06d5505bccface3767e61/ruff-0.9.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de92170dfa50c32a2b8206a647949590e752aca8100a0f6b8cefa02ae29dce80", size = 12763793 }, + { url = "https://files.pythonhosted.org/packages/b3/d0/0a3d8f56d1e49af466dc770eeec5c125977ba9479af92e484b5b0251ce9c/ruff-0.9.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d28532d73b1f3f627ba88e1456f50748b37f3a345d2be76e4c653bec6c3e393", size = 14386234 }, + { url = "https://files.pythonhosted.org/packages/04/70/e59c192a3ad476355e7f45fb3a87326f5219cc7c472e6b040c6c6595c8f0/ruff-0.9.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c746d7d1df64f31d90503ece5cc34d7007c06751a7a3bbeee10e5f2463d52d2", size = 12437505 }, + { url = "https://files.pythonhosted.org/packages/55/4e/3abba60a259d79c391713e7a6ccabf7e2c96e5e0a19100bc4204f1a43a51/ruff-0.9.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:11417521d6f2d121fda376f0d2169fb529976c544d653d1d6044f4c5562516ee", size = 11884799 }, + { url = "https://files.pythonhosted.org/packages/a3/db/b0183a01a9f25b4efcae919c18fb41d32f985676c917008620ad692b9d5f/ruff-0.9.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:5b9d71c3879eb32de700f2f6fac3d46566f644a91d3130119a6378f9312a38e1", size = 11527411 }, + { url = "https://files.pythonhosted.org/packages/0a/e4/3ebfcebca3dff1559a74c6becff76e0b64689cea02b7aab15b8b32ea245d/ruff-0.9.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:2e36c61145e70febcb78483903c43444c6b9d40f6d2f800b5552fec6e4a7bb9a", size = 12078868 }, + { url = "https://files.pythonhosted.org/packages/ec/b2/5ab808833e06c0a1b0d046a51c06ec5687b73c78b116e8d77687dc0cd515/ruff-0.9.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:2f71d09aeba026c922aa7aa19a08d7bd27c867aedb2f74285a2639644c1c12f5", size = 12524374 }, + { url = "https://files.pythonhosted.org/packages/e0/51/1432afcc3b7aa6586c480142caae5323d59750925c3559688f2a9867343f/ruff-0.9.5-py3-none-win32.whl", hash = "sha256:134f958d52aa6fdec3b294b8ebe2320a950d10c041473c4316d2e7d7c2544723", size = 9853682 }, + { url = "https://files.pythonhosted.org/packages/b7/ad/c7a900591bd152bb47fc4882a27654ea55c7973e6d5d6396298ad3fd6638/ruff-0.9.5-py3-none-win_amd64.whl", hash = "sha256:78cc6067f6d80b6745b67498fb84e87d32c6fc34992b52bffefbdae3442967d6", size = 10865744 }, + { url = "https://files.pythonhosted.org/packages/75/d9/fde7610abd53c0c76b6af72fc679cb377b27c617ba704e25da834e0a0608/ruff-0.9.5-py3-none-win_arm64.whl", hash = "sha256:18a29f1a005bddb229e580795627d297dfa99f16b30c7039e73278cf6b5f9fa9", size = 10064595 }, ] [[package]]