Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Bugfix][Core] Fix tekken edge case for mistral tokenizer #8640

Merged
merged 12 commits into from
Sep 20, 2024
30 changes: 29 additions & 1 deletion tests/models/decoder_only/language/test_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import pytest

from vllm import SamplingParams
from vllm import LLM

from ...utils import check_logprobs_close

Expand All @@ -16,6 +17,10 @@
]

SAMPLING_PARAMS = SamplingParams(max_tokens=512, temperature=0.0, logprobs=5)
SYMBOLIC_LANG_PROMPTS = [
"勇敢な船乗りについての詩を書く", # japanese
"寫一首關於勇敢的水手的詩", # chinese
]

# for function calling
TOOLS = [{
Expand Down Expand Up @@ -77,7 +82,7 @@ def test_models(
with hf_runner(model, dtype=dtype) as hf_model:
hf_outputs = hf_model.generate_greedy_logprobs_limit(
example_prompts, max_tokens, num_logprobs)
patrickvonplaten marked this conversation as resolved.
Show resolved Hide resolved

patrickvonplaten marked this conversation as resolved.
Show resolved Hide resolved
with vllm_runner(model, dtype=dtype,
patrickvonplaten marked this conversation as resolved.
Show resolved Hide resolved
tokenizer_mode="mistral") as vllm_model:
vllm_outputs = vllm_model.generate_greedy_logprobs(
Expand Down Expand Up @@ -131,6 +136,29 @@ def test_mistral_format(
)


@pytest.mark.parametrize("model", MODELS[1:])
@pytest.mark.parametrize("dtype", ["bfloat16"])
@pytest.mark.parametrize("max_tokens", [64])
@pytest.mark.parametrize("num_logprobs", [5])
@pytest.mark.parametrize("prompt", SYMBOLIC_LANG_PROMPTS)
def test_mistral_symbolic_languages(
vllm_runner,
model: str,
dtype: str,
max_tokens: int,
num_logprobs: int,
prompt: str,
) -> None:
prompt = "hi"
msg = {"role": "user", "content": prompt}
llm = LLM(model=model,
tokenizer_mode="mistral",
config_format="mistral",
load_format="mistral")
outputs = llm.chat([msg], sampling_params=SAMPLING_PARAMS)
assert "�" not in outputs[0].outputs[0].text.strip()


@pytest.mark.parametrize("dtype", ["bfloat16"])
@pytest.mark.parametrize("model", MODELS[1:]) # v1 can't do func calling
def test_mistral_function_calling(
Expand Down
32 changes: 29 additions & 3 deletions vllm/transformers_utils/tokenizers/mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,10 +175,29 @@ def apply_chat_template(self,

def convert_tokens_to_string(self, tokens: List[str]) -> str:
if isinstance(self.tokenizer, Tekkenizer):
return "".join(t for t in tokens
if t not in self.tokenizer._all_special_tokens)
tokens = [
t for t in tokens
if t not in self.tokenizer._all_special_tokens
]

if any(isinstance(t, bytes) for t in tokens):
# we need to encode and decode all tokens again
shift = self.tokenizer.num_special_tokens
byte_tokens = [
t.encode("utf-8") if not isinstance(t, bytes) else t
for t in tokens
]
ids = [
self.tokenizer._tekken_token2id_nospecial[t] + shift
for t in byte_tokens
]
decoded = self.tokenizer.decode(ids)
else:
decoded = "".join(tokens)
else:
return self.tokenizer.decode(tokens) # type: ignore[arg-type]
decoded = self.tokenizer.decode(tokens) # type: ignore[arg-type]

return decoded

def decode(self, ids: Union[List[int], int]) -> str:
if isinstance(ids, int):
Expand All @@ -200,4 +219,11 @@ def convert_ids_to_tokens(
self.tokenizer)

tokens = [self.tokenizer.id_to_piece(id) for id in ids]

if any(t.strip() == "�" for t in tokens):
# if any stripped decoded token is undefined
# because it's invalid unicode then pass bytes
# See: https://github.com/vllm-project/vllm/pull/8640
tokens = [self.tokenizer.id_to_byte_piece(id) for id in ids]

return tokens
Loading