From 0ce00adfdb295e85f49a22fdfaf452846e44bdbe Mon Sep 17 00:00:00 2001 From: George Ohashi Date: Thu, 6 Mar 2025 14:40:02 -0500 Subject: [PATCH] merge main --- src/llmcompressor/__init__.py | 4 +--- src/llmcompressor/core/__init__.py | 4 ---- src/llmcompressor/transformers/finetune/session_mixin.py | 4 ++-- src/llmcompressor/transformers/finetune/text_generation.py | 2 +- .../finetune/test_finetune_no_recipe_custom_dataset.py | 2 +- .../transformers/finetune/test_finetune_without_recipe.py | 2 +- .../transformers/finetune/test_oneshot_then_finetune.py | 3 +-- tests/llmcompressor/transformers/finetune/test_safetensors.py | 2 +- tests/llmcompressor/transformers/test_clear_ml.py | 2 +- 9 files changed, 9 insertions(+), 16 deletions(-) diff --git a/src/llmcompressor/__init__.py b/src/llmcompressor/__init__.py index f979a7453..e65cf51b3 100644 --- a/src/llmcompressor/__init__.py +++ b/src/llmcompressor/__init__.py @@ -38,8 +38,6 @@ active_session, callbacks, create_session, - finalize, - initialize, reset_session, ) -from llmcompressor.entrypoints import Oneshot, oneshot +from llmcompressor.entrypoints import Oneshot, oneshot, train diff --git a/src/llmcompressor/core/__init__.py b/src/llmcompressor/core/__init__.py index 47e710943..85a074869 100644 --- a/src/llmcompressor/core/__init__.py +++ b/src/llmcompressor/core/__init__.py @@ -13,8 +13,6 @@ active_session, callbacks, create_session, - finalize, - initialize, reset_session, ) from llmcompressor.core.state import Data, Hardware, ModifiedState, State @@ -35,8 +33,6 @@ "create_session", "active_session", "reset_session", - "initialize", - "finalize", "apply", "callbacks", "LifecycleCallbacks", diff --git a/src/llmcompressor/transformers/finetune/session_mixin.py b/src/llmcompressor/transformers/finetune/session_mixin.py index 7b2697f41..20d9ae510 100644 --- a/src/llmcompressor/transformers/finetune/session_mixin.py +++ b/src/llmcompressor/transformers/finetune/session_mixin.py @@ -11,7 +11,7 @@ from transformers.trainer_callback import TrainerState from transformers.trainer_utils import get_last_checkpoint -from llmcompressor.core import active_session, callbacks, create_session, finalize +from llmcompressor.core import active_session, callbacks, create_session from llmcompressor.metrics import LoggerManager from llmcompressor.modifiers.distillation.utils.pytorch.model_wrapper import ( KDModelWrapper, @@ -182,7 +182,7 @@ def finalize_session(self): with summon_full_params_context(self.model, offload_to_cpu=True): # in order to update each layer we need to gathers all its parameters - finalize() + active_session().finalize() logger.info("Finalized LLM Compressor session") model = get_session_model() self.model = model diff --git a/src/llmcompressor/transformers/finetune/text_generation.py b/src/llmcompressor/transformers/finetune/text_generation.py index c27acc602..680b2fc4f 100644 --- a/src/llmcompressor/transformers/finetune/text_generation.py +++ b/src/llmcompressor/transformers/finetune/text_generation.py @@ -49,7 +49,7 @@ def oneshot(**kwargs) -> None: @deprecated( message=( - "`from llmcompressor.transformers import train` is deprecated, " + "`from llmcompressor import train` is deprecated, " "please use `from llmcompressor import train`." ) ) diff --git a/tests/llmcompressor/transformers/finetune/test_finetune_no_recipe_custom_dataset.py b/tests/llmcompressor/transformers/finetune/test_finetune_no_recipe_custom_dataset.py index 37524069c..2195ae4e6 100644 --- a/tests/llmcompressor/transformers/finetune/test_finetune_no_recipe_custom_dataset.py +++ b/tests/llmcompressor/transformers/finetune/test_finetune_no_recipe_custom_dataset.py @@ -18,7 +18,7 @@ class TestFinetuneNoRecipeCustomDataset(unittest.TestCase): def _test_finetune_wout_recipe_custom_dataset(self): - from llmcompressor.transformers import train + from llmcompressor import train dataset_path = Path(tempfile.mkdtemp()) diff --git a/tests/llmcompressor/transformers/finetune/test_finetune_without_recipe.py b/tests/llmcompressor/transformers/finetune/test_finetune_without_recipe.py index 7facd088e..42eb495d8 100644 --- a/tests/llmcompressor/transformers/finetune/test_finetune_without_recipe.py +++ b/tests/llmcompressor/transformers/finetune/test_finetune_without_recipe.py @@ -20,7 +20,7 @@ def setUp(self): self.output = "./finetune_output" def test_finetune_without_recipe(self): - from llmcompressor.transformers import train + from llmcompressor import train recipe_str = None device = "cuda:0" diff --git a/tests/llmcompressor/transformers/finetune/test_oneshot_then_finetune.py b/tests/llmcompressor/transformers/finetune/test_oneshot_then_finetune.py index e8e0ae426..ec68e1f5d 100644 --- a/tests/llmcompressor/transformers/finetune/test_oneshot_then_finetune.py +++ b/tests/llmcompressor/transformers/finetune/test_oneshot_then_finetune.py @@ -6,10 +6,9 @@ from transformers import AutoModelForCausalLM from transformers.utils.quantization_config import CompressedTensorsConfig -from llmcompressor import oneshot +from llmcompressor import oneshot, train from llmcompressor.core import create_session from llmcompressor.modifiers.quantization import QuantizationModifier -from llmcompressor.transformers import train @pytest.mark.unit diff --git a/tests/llmcompressor/transformers/finetune/test_safetensors.py b/tests/llmcompressor/transformers/finetune/test_safetensors.py index 84c1bf1b2..462c529e6 100644 --- a/tests/llmcompressor/transformers/finetune/test_safetensors.py +++ b/tests/llmcompressor/transformers/finetune/test_safetensors.py @@ -22,7 +22,7 @@ def setUp(self): self.output = Path("./finetune_output") def test_safetensors(self): - from llmcompressor.transformers import train + from llmcompressor import train device = "cuda:0" output_dir = self.output / "output1" diff --git a/tests/llmcompressor/transformers/test_clear_ml.py b/tests/llmcompressor/transformers/test_clear_ml.py index 4a7922a66..94abd1a62 100644 --- a/tests/llmcompressor/transformers/test_clear_ml.py +++ b/tests/llmcompressor/transformers/test_clear_ml.py @@ -10,7 +10,7 @@ except Exception: is_clearml = False -from llmcompressor.transformers import train +from llmcompressor import train @pytest.mark.skipif(not is_clearml, reason="clearML not installed")