Skip to content

Commit 413f96f

Browse files
youkaichaokwang1012
authored andcommitted
[beam search] add output for manually checking the correctness (vllm-project#8684)
1 parent 35d5b4b commit 413f96f

File tree

1 file changed

+10
-3
lines changed

1 file changed

+10
-3
lines changed

tests/samplers/test_beam_search.py

+10-3
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
# 3. Use the model "huggyllama/llama-7b".
1212
MAX_TOKENS = [128]
1313
BEAM_WIDTHS = [4]
14-
MODELS = ["facebook/opt-125m"]
14+
MODELS = ["TinyLlama/TinyLlama-1.1B-Chat-v1.0"]
1515

1616

1717
@pytest.mark.parametrize("model", MODELS)
@@ -37,8 +37,15 @@ def test_beam_search_single_input(
3737
beam_width, max_tokens)
3838

3939
for i in range(len(example_prompts)):
40-
hf_output_ids, _ = hf_outputs[i]
41-
vllm_output_ids, _ = vllm_outputs[i]
40+
hf_output_ids, hf_output_texts = hf_outputs[i]
41+
vllm_output_ids, vllm_output_texts = vllm_outputs[i]
42+
for i, (hf_text,
43+
vllm_text) in enumerate(zip(hf_output_texts,
44+
vllm_output_texts)):
45+
print(f">>>{i}-th hf output:")
46+
print(hf_text)
47+
print(f">>>{i}-th vllm output:")
48+
print(vllm_text)
4249
assert len(hf_output_ids) == len(vllm_output_ids)
4350
for j in range(len(hf_output_ids)):
4451
assert hf_output_ids[j] == vllm_output_ids[j], (

0 commit comments

Comments
 (0)