Skip to content

Commit

Permalink
Update test_ignore_eos (vllm-project#4898)
Browse files Browse the repository at this point in the history
  • Loading branch information
simon-mo authored Jun 2, 2024
1 parent 044793d commit ed59a7e
Showing 1 changed file with 11 additions and 10 deletions.
21 changes: 11 additions & 10 deletions tests/samplers/test_ignore_eos.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,25 +7,26 @@

from vllm import SamplingParams

MODELS = ["facebook/opt-125m"]
# We also test with llama because it has generation_config to specify EOS
# (past regression).
MODELS = ["facebook/opt-125m", "meta-llama/Llama-2-7b-hf"]


@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("dtype", ["half"])
@pytest.mark.parametrize("max_tokens", [1024])
def test_beam_search_single_input(
@pytest.mark.parametrize("max_tokens", [512])
def test_ignore_eos(
vllm_runner,
example_prompts,
model: str,
dtype: str,
max_tokens: int,
) -> None:
example_prompts = "1 + 1 is"

vllm_model = vllm_runner(model, dtype=dtype)
sampling_params = SamplingParams(max_tokens=max_tokens, ignore_eos=True)
ignore_eos_output = vllm_model.model.generate(
example_prompts, sampling_params=sampling_params)
print(len(ignore_eos_output[0].outputs[0].token_ids))
assert max_tokens - len(ignore_eos_output[0].outputs[0].token_ids) < 10
assert max_tokens - len(ignore_eos_output[0].outputs[0].token_ids) >= 0

for prompt in example_prompts:
ignore_eos_output = vllm_model.model.generate(
prompt, sampling_params=sampling_params)
output_length = len(ignore_eos_output[0].outputs[0].token_ids)
assert output_length == max_tokens

0 comments on commit ed59a7e

Please sign in to comment.