Skip to content

Commit 2ad9a6e

Browse files
reduce the kernel test coverage for simulator (#36)
reduce the kernel test coverage for simulator Signed-off-by: chzhang <[email protected]>
1 parent d66181a commit 2ad9a6e

10 files changed

+109
-0
lines changed

tests/conftest.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
import os
3+
4+
5+
def pytest_generate_tests(metafunc):
6+
use_mini_pytest_profiler = os.getenv("XPU_KERNEL_PYTEST_PROFILER",
7+
"") == "MINI"
8+
if not use_mini_pytest_profiler:
9+
return
10+
11+
module = metafunc.module
12+
13+
func_pytest_params = getattr(module, "MINI_PYTEST_PARAMS", {})
14+
profile = func_pytest_params.get(metafunc.function.__name__, None)
15+
16+
if not profile:
17+
profile = func_pytest_params.get('default', None)
18+
19+
if not profile:
20+
return
21+
22+
for param_name, values in profile.items():
23+
if param_name in metafunc.fixturenames:
24+
new_markers = []
25+
for mark in metafunc.definition.own_markers:
26+
if mark.name == "parametrize" and mark.args[0] != param_name:
27+
new_markers.append(mark)
28+
metafunc.definition.own_markers = new_markers
29+
metafunc.parametrize(param_name, values)

tests/test_activation.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,14 @@
1515
f"xpu:{i}" for i in range(1 if torch.xpu.device_count() == 1 else 2)
1616
]
1717

18+
#override pytest parameters when enable mini pytest
19+
MINI_PYTEST_PARAMS = {
20+
"default": {
21+
"num_tokens": [1],
22+
"d": [128],
23+
},
24+
}
25+
1826

1927
@pytest.mark.parametrize("activation",
2028
["silu_and_mul", "mul_and_silu", "gelu", "gelu_tanh"])

tests/test_cache.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,22 @@
3636

3737
KV_CACHE_DTYPE = ["auto"] # FIXME: will add "fp8" when accuracy is improved
3838

39+
#override pytest parameters when enable mini pytest
40+
MINI_PYTEST_PARAMS = {
41+
"default": {
42+
"num_tokens": [1],
43+
"head_size": [64, 80],
44+
},
45+
"test_concat_and_cache_mla": {
46+
"num_tokens": [1],
47+
"num_blocks": [32],
48+
},
49+
"test_gather_cache_mla": {
50+
"num_blocks": [32],
51+
"max_seq_len": [64],
52+
},
53+
}
54+
3955

4056
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
4157
@pytest.mark.parametrize("num_heads", NUM_HEADS)

tests/test_deepseek_scaling_rope.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,13 @@
66

77
DEVICE = torch.device("xpu")
88

9+
#override pytest parameters when enable mini pytest
10+
MINI_PYTEST_PARAMS = {
11+
"default": {
12+
"batch": [1],
13+
},
14+
}
15+
916

1017
def _rotate_neox(x):
1118
x1 = x[..., :x.shape[-1] // 2]

tests/test_fp8_quant.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,14 @@ def seed_everything(seed):
129129
SEEDS = [0]
130130
FP8_DTYPES = [torch.float8_e5m2, torch.float8_e4m3fn]
131131

132+
#override pytest parameters when enable mini pytest
133+
MINI_PYTEST_PARAMS = {
134+
"default": {
135+
"num_tokens": [1, 7, 83],
136+
"hidden_size": [1, 2, 3, 4, 16],
137+
},
138+
}
139+
132140

133141
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
134142
@pytest.mark.parametrize("hidden_size", HIDDEN_SIZES)

tests/test_grouped_topk.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,13 @@
66
fused_grouped_topk_sycl, grouped_topk)
77
from tests.utils import seed_everything
88

9+
#override pytest parameters when enable mini pytest
10+
MINI_PYTEST_PARAMS = {
11+
"default": {
12+
"n_hidden": [128, 256],
13+
},
14+
}
15+
916

1017
@pytest.mark.parametrize("n_token", [1, 33, 64])
1118
@pytest.mark.parametrize("n_hidden", [1024, 2048])

tests/test_layernorm.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,14 @@
1717
f"xpu:{i}" for i in range(1 if torch.xpu.device_count() == 1 else 2)
1818
]
1919

20+
#override pytest parameters when enable mini pytest
21+
MINI_PYTEST_PARAMS = {
22+
"default": {
23+
"num_tokens": [7],
24+
"hidden_size": [8],
25+
},
26+
}
27+
2028

2129
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
2230
@pytest.mark.parametrize("hidden_size", HIDDEN_SIZES)

tests/test_moe_sum.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,14 @@
1212

1313
TOP_KS = [2, 6]
1414

15+
#override pytest parameters when enable mini pytest
16+
MINI_PYTEST_PARAMS = {
17+
"default": {
18+
"m": [1, 33],
19+
"k": [128, 256],
20+
},
21+
}
22+
1523

1624
@pytest.mark.parametrize("m", [1, 33, 64, 222])
1725
@pytest.mark.parametrize("topk", TOP_KS)

tests/test_rotary_embedding.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,16 @@ def rotary_embedding_opcheck(rot,
3131
rot.is_neox_style))
3232

3333

34+
#override pytest parameters when enable mini pytest
35+
MINI_PYTEST_PARAMS = {
36+
"default": {
37+
"max_position": [11, 256],
38+
"head_size": [32],
39+
"seq_len": [11, 128],
40+
},
41+
}
42+
43+
3444
@pytest.mark.parametrize("device", ["xpu"])
3545
@pytest.mark.parametrize("max_position", [11, 4096, 32768])
3646
@pytest.mark.parametrize("is_neox_style", [True, False])

tests/test_swigluoai_and_mul.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,14 @@
1313
f"xpu:{i}" for i in range(1 if torch.xpu.device_count() == 1 else 2)
1414
]
1515

16+
#override pytest parameters when enable mini pytest
17+
MINI_PYTEST_PARAMS = {
18+
"default": {
19+
"num_tokens": [1, 7],
20+
"d": [32, 64],
21+
},
22+
}
23+
1624
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float: 1e-5}
1725
default_rtol = {
1826
torch.float16: 1e-3,

0 commit comments

Comments
 (0)