From 07726ef3d317107385622cfc99fb10420fa7ce9e Mon Sep 17 00:00:00 2001 From: Dipika Sikka Date: Wed, 5 Mar 2025 09:52:19 -0500 Subject: [PATCH] Remove MonkeyPatch for GPUs (#1227) SUMMARY: - This was added previously due to an error we were seeing with multiple GPUs (i.e having more than one gpu visible would cause an error) - Can verify this no longer happens --- .../finetune/test_finetune_no_recipe_custom_dataset.py | 7 ------- .../finetune/test_oneshot_and_finetune_with_tokenizer.py | 4 ---- 2 files changed, 11 deletions(-) diff --git a/tests/llmcompressor/transformers/finetune/test_finetune_no_recipe_custom_dataset.py b/tests/llmcompressor/transformers/finetune/test_finetune_no_recipe_custom_dataset.py index f8f8d9827..37524069c 100644 --- a/tests/llmcompressor/transformers/finetune/test_finetune_no_recipe_custom_dataset.py +++ b/tests/llmcompressor/transformers/finetune/test_finetune_no_recipe_custom_dataset.py @@ -108,7 +108,6 @@ def create_mock_file(self, extension, content, path, filename): def tearDown(self): shutil.rmtree(self.output) - self.monkeypatch.undo() @pytest.mark.integration @@ -121,11 +120,8 @@ class TestOneshotCustomDatasetSmall(TestFinetuneNoRecipeCustomDataset): def setUp(self): import torch - self.monkeypatch = pytest.MonkeyPatch() - if torch.cuda.is_available(): self.device = "cuda:0" - self.monkeypatch.setenv("CUDA_VISIBLE_DEVICES", "0") else: self.device = "cpu" @@ -147,15 +143,12 @@ def setUp(self): import torch from transformers import AutoModelForCausalLM - self.monkeypatch = pytest.MonkeyPatch() self.device = "cuda:0" self.output = "./oneshot_output" - self.monkeypatch.setenv("CUDA_VISIBLE_DEVICES", "0") self.model = AutoModelForCausalLM.from_pretrained( self.model, device_map=self.device, torch_dtype=torch.bfloat16 ) - self.monkeypatch = pytest.MonkeyPatch() def test_oneshot_then_finetune_gpu(self): self._test_finetune_wout_recipe_custom_dataset() diff --git a/tests/llmcompressor/transformers/finetune/test_oneshot_and_finetune_with_tokenizer.py b/tests/llmcompressor/transformers/finetune/test_oneshot_and_finetune_with_tokenizer.py index 509464a34..45b25818b 100644 --- a/tests/llmcompressor/transformers/finetune/test_oneshot_and_finetune_with_tokenizer.py +++ b/tests/llmcompressor/transformers/finetune/test_oneshot_and_finetune_with_tokenizer.py @@ -21,7 +21,6 @@ def setUp(self): self.output = "./finetune_output" # finetune workflows in general seem to have trouble with multi-gpus # use just one atm - self.monkeypatch = pytest.MonkeyPatch() def test_oneshot_and_finetune_with_tokenizer(self): from datasets import load_dataset @@ -29,8 +28,6 @@ def test_oneshot_and_finetune_with_tokenizer(self): from llmcompressor.transformers import compress - self.monkeypatch.setenv("CUDA_VISIBLE_DEVICES", "0") - recipe_str = ( "tests/llmcompressor/transformers/finetune/test_alternate_recipe.yaml" ) @@ -71,4 +68,3 @@ def test_oneshot_and_finetune_with_tokenizer(self): def tearDown(self): shutil.rmtree(self.output) - self.monkeypatch.undo()