Skip to content

Commit

Permalink
update tests
Browse files Browse the repository at this point in the history
  • Loading branch information
dsikka committed Aug 26, 2024
1 parent cb98f34 commit f280743
Show file tree
Hide file tree
Showing 11 changed files with 74 additions and 45 deletions.
6 changes: 5 additions & 1 deletion .github/workflows/test-check.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -114,10 +114,14 @@ jobs:
- name: "⚙️ Install dependencies"
id: install
run: pip3 install .[dev]
- name: "🔬 Running transformers tests[skipping over finetune]"
- name: "🔬 Running transformers tests"
if: always() && steps.install.outcome == 'success'
run: |
pytest tests/llmcompressor/transformers/compression -v
- name: Run Finetune Tests
if: always() && steps.install.outcome == 'success'
run: |
pytest -v tests/llmcompressor/transformers/compression -m unit
- name: Running GPTQ Tests
if: always() && steps.install.outcome == 'success'
run: |
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
cadence: "nightly"
test_type: "regression"
model: "Xenova/llama2.c-stories15M"
dataset: open_platypus
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
cadence: "nightly"
test_type: "regression"
model: "Xenova/llama2.c-stories15M"
dataset_config_name: wikitext-2-raw-v1
dataset: wikitext
Original file line number Diff line number Diff line change
Expand Up @@ -3,38 +3,41 @@
from pathlib import Path

import pytest
from parameterized import parameterized_class

from tests.testing_utils import requires_torch
from tests.testing_utils import parse_params, requires_gpu, requires_torch

CONFIGS_DIRECTORY = "tests/llmcompressor/transformers/finetune/finetune_generic"


@pytest.mark.integration
@requires_torch
@requires_gpu
@parameterized_class(parse_params(CONFIGS_DIRECTORY))
class TestOneshotWithModifierObject(unittest.TestCase):
model = None
dataset = None

def setUp(self):
self.output = Path("./finetune_output")

def test_oneshot_with_modifier_object(self):
import torch

from llmcompressor.modifiers.obcq.base import SparseGPTModifier
from llmcompressor.transformers import oneshot

recipe_str = [
SparseGPTModifier(sparsity=0.5, targets=[r"re:model.layers.\d+$"])
]
model = "Xenova/llama2.c-stories15M"

device = "cuda:0"
if not torch.cuda.is_available():
device = "cpu"
dataset = "open_platypus"
concatenate_data = False
num_calibration_samples = 64
output_dir = self.output / "oneshot_out"
splits = {"calibration": "train[:10%]"}

oneshot(
model=model,
dataset=dataset,
model=self.model,
dataset=self.dataset,
output_dir=output_dir,
num_calibration_samples=num_calibration_samples,
recipe=recipe_str,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,34 +2,37 @@
import unittest

import pytest
from parameterized import parameterized_class

from tests.testing_utils import requires_torch
from tests.testing_utils import parse_params, requires_gpu, requires_torch

CONFIGS_DIRECTORY = "tests/llmcompressor/transformers/finetune/finetune_generic"


@pytest.mark.integration
@requires_torch
@requires_gpu
@parameterized_class(parse_params(CONFIGS_DIRECTORY))
class TestFinetuneWithoutRecipe(unittest.TestCase):
model = None
dataset = None

def setUp(self):
self.output = "./finetune_output"

def test_finetune_without_recipe(self):
import torch

from llmcompressor.transformers import train

recipe_str = None
model = "Xenova/llama2.c-stories15M"
device = "cuda:0"
if not torch.cuda.is_available():
device = "cpu"
dataset = "open_platypus"

concatenate_data = False
max_steps = 50
splits = "train"

train(
model=model,
dataset=dataset,
model=self.model,
dataset=self.dataset,
output_dir=self.output,
recipe=recipe_str,
max_steps=max_steps,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,26 @@
import unittest

import pytest
from parameterized import parameterized_class

from tests.testing_utils import requires_torch
from tests.testing_utils import parse_params, requires_gpu, requires_torch

CONFIGS_DIRECTORY = "tests/llmcompressor/transformers/finetune/finetune_tokenizer"


@pytest.mark.integration
@requires_torch
@requires_gpu
@parameterized_class(parse_params(CONFIGS_DIRECTORY))
class TestOneshotAndFinetuneWithTokenizer(unittest.TestCase):
model = None
dataset = None
dataset_config_name = None

def setUp(self):
self.output = "./finetune_output"

def test_oneshot_and_finetune_with_tokenizer(self):
import torch
from datasets import load_dataset
from transformers import AutoTokenizer

Expand All @@ -23,27 +31,26 @@ def test_oneshot_and_finetune_with_tokenizer(self):
"tests/llmcompressor/transformers/finetune/test_alternate_recipe.yaml"
)
tokenizer = AutoTokenizer.from_pretrained(
"Xenova/llama2.c-stories15M",
self.model,
)
device = "cuda:0"
if not torch.cuda.is_available():
device = "cpu"
model = SparseAutoModelForCausalLM.from_pretrained(
"Xenova/llama2.c-stories15M", device_map=device
model_loaded = SparseAutoModelForCausalLM.from_pretrained(
self.model, device_map=device
)

dataset_config_name = "wikitext-2-raw-v1"
dataset = load_dataset("wikitext", dataset_config_name, split="train[:50%]")
dataset_loaded = load_dataset(
self.dataset, self.dataset_config_name, split="train[:50%]"
)

concatenate_data = True
run_stages = True
max_steps = 50
splits = {"train": "train[:50%]", "calibration": "train[50%:60%]"}

compress(
model=model,
dataset=dataset,
dataset_config_name=dataset_config_name,
model=model_loaded,
dataset=dataset_loaded,
dataset_config_name=self.dataset_config_name,
run_stages=run_stages,
output_dir=self.output,
recipe=recipe_str,
Expand Down
23 changes: 12 additions & 11 deletions tests/llmcompressor/transformers/finetune/test_safetensors.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,34 +4,35 @@
from pathlib import Path

import pytest
from parameterized import parameterized_class

from tests.testing_utils import requires_torch
from tests.testing_utils import parse_params, requires_gpu, requires_torch

CONFIGS_DIRECTORY = "tests/llmcompressor/transformers/finetune/finetune_generic"


@pytest.mark.integration
@requires_torch
@requires_gpu
@parameterized_class(parse_params(CONFIGS_DIRECTORY))
class TestSafetensors(unittest.TestCase):
model = None
dataset = None

def setUp(self):
self.output = Path("./finetune_output")

def test_safetensors(self):
import torch

from llmcompressor.transformers import train

model = "Xenova/llama2.c-stories15M"
device = "cuda:0"
if not torch.cuda.is_available():
device = "cpu"

dataset = "open_platypus"
output_dir = self.output / "output1"
max_steps = 10
splits = {"train": "train[:10%]"}

train(
model=model,
dataset=dataset,
model=self.model,
dataset=self.dataset,
output_dir=output_dir,
max_steps=max_steps,
splits=splits,
Expand All @@ -45,7 +46,7 @@ def test_safetensors(self):
new_output_dir = self.output / "output2"
train(
model=output_dir,
dataset=dataset,
dataset=self.dataset,
output_dir=new_output_dir,
max_steps=max_steps,
splits=splits,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ def __init__(
)


@pytest.mark.unit
def test_mixin_init():
model_state_path = "Xenova/llama2.c-stories15M"
model = AutoModelForCausalLM.from_pretrained(model_state_path)
Expand Down Expand Up @@ -54,6 +55,7 @@ def mixin_trainer():
)


@pytest.mark.unit
def test_mixin_session_init(mixin_trainer):
mixin_trainer.initialize_session(epoch=0.0, checkpoint=None)
session = active_session()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,6 @@ test_type: "regression"
model: "meta-llama/Llama-2-7b-hf"
dataset: open_platypus
recipe: "tests/llmcompressor/transformers/obcq/recipes/quant.yaml"
device: "auto"
device: "cuda:0"
num_samples: 512
perplexity: 20
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,6 @@ test_type: "regression"
model: "meta-llama/Llama-2-7b-hf"
dataset: open_platypus
recipe: "tests/llmcompressor/transformers/obcq/recipes/quant_and_sparse.yaml"
device: "auto"
device: "cuda:0"
num_samples: 512
perplexity: 20
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,6 @@ test_type: "regression"
model: "meta-llama/Llama-2-7b-hf"
dataset: open_platypus
recipe: "tests/llmcompressor/transformers/obcq/recipes/sparse.yaml"
device: "auto"
device: "cuda:0"
num_samples: 512
perplexity: 20

0 comments on commit f280743

Please sign in to comment.