Skip to content

Commit a0f95b0

Browse files
committed
Change name
Signed-off-by: Rafael Vasquez <[email protected]>
1 parent 68db9c1 commit a0f95b0

File tree

2 files changed

+6
-6
lines changed

2 files changed

+6
-6
lines changed

tests/e2e/test_spyre_cb.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ def test_long_context_batches(
188188
max_tokens = 10
189189

190190
# (batch_size, prompt_length) pairs
191-
batch_prompt_pairs = [
191+
batch_token_pairs = [
192192
(32, 512),
193193
(16, 1500),
194194
(8, 3000),
@@ -215,8 +215,8 @@ def test_long_context_batches(
215215
logprobs=0,
216216
)
217217

218-
for batch_size, prompt_len in batch_prompt_pairs:
219-
prompt = create_seq_prompt(model, min_token_length=prompt_len)
218+
for batch_size, token_len in batch_token_pairs:
219+
prompt = create_seq_prompt(model, token_length=token_len)
220220
prompts = [prompt] * batch_size
221221

222222
vllm_outputs = vllm_model.generate(prompts, sampling_params)

tests/e2e/test_spyre_static_batching_limits.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,9 @@ def test_max_prompt_len_and_new_tokens(model: str,
3838
# Craft a request with a prompt that is slightly too long for the warmup
3939
# shape
4040
prompt = create_text_prompt(model,
41-
min_tokens=max_prompt_length,
42-
max_tokens=max_prompt_length + max_new_tokens -
43-
1)
41+
min_token_length=max_prompt_length,
42+
max_token_length=max_prompt_length +
43+
max_new_tokens - 1)
4444
sampling_params = SamplingParams(max_tokens=1)
4545

4646
with pytest.raises(ValueError, match="warmup"):

0 commit comments

Comments
 (0)