Skip to content

Commit

Permalink
[beam search] add output for manually checking the correctness (vllm-…
Browse files Browse the repository at this point in the history
  • Loading branch information
youkaichao authored and MengqingCao committed Sep 30, 2024
1 parent d84fe51 commit a7d8b79
Showing 1 changed file with 10 additions and 3 deletions.
13 changes: 10 additions & 3 deletions tests/samplers/test_beam_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# 3. Use the model "huggyllama/llama-7b".
MAX_TOKENS = [128]
BEAM_WIDTHS = [4]
MODELS = ["facebook/opt-125m"]
MODELS = ["TinyLlama/TinyLlama-1.1B-Chat-v1.0"]


@pytest.mark.parametrize("model", MODELS)
Expand All @@ -37,8 +37,15 @@ def test_beam_search_single_input(
beam_width, max_tokens)

for i in range(len(example_prompts)):
hf_output_ids, _ = hf_outputs[i]
vllm_output_ids, _ = vllm_outputs[i]
hf_output_ids, hf_output_texts = hf_outputs[i]
vllm_output_ids, vllm_output_texts = vllm_outputs[i]
for i, (hf_text,
vllm_text) in enumerate(zip(hf_output_texts,
vllm_output_texts)):
print(f">>>{i}-th hf output:")
print(hf_text)
print(f">>>{i}-th vllm output:")
print(vllm_text)
assert len(hf_output_ids) == len(vllm_output_ids)
for j in range(len(hf_output_ids)):
assert hf_output_ids[j] == vllm_output_ids[j], (
Expand Down

0 comments on commit a7d8b79

Please sign in to comment.