-
-
Notifications
You must be signed in to change notification settings - Fork 5k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Frontend] Add Early Validation For Chat Template / Tool Call Parser #9151
Merged
Merged
Changes from all commits
Commits
Show all changes
5 commits
Select commit
Hold shift + click to select a range
ea4a3b5
Add chat util validation func
alex-jw-brooks 2a5fe23
Add parsed arg validation
alex-jw-brooks 69a7d49
Add fast fail to api server entrypoint
alex-jw-brooks 9cc4745
Add validation to serve script
alex-jw-brooks 77f3bd1
Refactor cli parsing tests
alex-jw-brooks File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,91 +1,131 @@ | ||
import json | ||
import unittest | ||
|
||
from vllm.entrypoints.openai.cli_args import make_arg_parser | ||
import pytest | ||
|
||
from vllm.entrypoints.openai.cli_args import (make_arg_parser, | ||
validate_parsed_serve_args) | ||
from vllm.entrypoints.openai.serving_engine import LoRAModulePath | ||
from vllm.utils import FlexibleArgumentParser | ||
|
||
from ...utils import VLLM_PATH | ||
|
||
LORA_MODULE = { | ||
"name": "module2", | ||
"path": "/path/to/module2", | ||
"base_model_name": "llama" | ||
} | ||
CHATML_JINJA_PATH = VLLM_PATH / "examples/template_chatml.jinja" | ||
assert CHATML_JINJA_PATH.exists() | ||
|
||
|
||
class TestLoraParserAction(unittest.TestCase): | ||
@pytest.fixture | ||
def serve_parser(): | ||
parser = FlexibleArgumentParser(description="vLLM's remote OpenAI server.") | ||
return make_arg_parser(parser) | ||
|
||
def setUp(self): | ||
# Setting up argparse parser for tests | ||
parser = FlexibleArgumentParser( | ||
description="vLLM's remote OpenAI server.") | ||
self.parser = make_arg_parser(parser) | ||
|
||
def test_valid_key_value_format(self): | ||
# Test old format: name=path | ||
args = self.parser.parse_args([ | ||
'--lora-modules', | ||
'module1=/path/to/module1', | ||
### Tests for Lora module parsing | ||
def test_valid_key_value_format(serve_parser): | ||
# Test old format: name=path | ||
args = serve_parser.parse_args([ | ||
'--lora-modules', | ||
'module1=/path/to/module1', | ||
]) | ||
expected = [LoRAModulePath(name='module1', path='/path/to/module1')] | ||
assert args.lora_modules == expected | ||
|
||
|
||
def test_valid_json_format(serve_parser): | ||
# Test valid JSON format input | ||
args = serve_parser.parse_args([ | ||
'--lora-modules', | ||
json.dumps(LORA_MODULE), | ||
]) | ||
expected = [ | ||
LoRAModulePath(name='module2', | ||
path='/path/to/module2', | ||
base_model_name='llama') | ||
] | ||
assert args.lora_modules == expected | ||
|
||
|
||
def test_invalid_json_format(serve_parser): | ||
# Test invalid JSON format input, missing closing brace | ||
with pytest.raises(SystemExit): | ||
serve_parser.parse_args([ | ||
'--lora-modules', '{"name": "module3", "path": "/path/to/module3"' | ||
]) | ||
expected = [LoRAModulePath(name='module1', path='/path/to/module1')] | ||
self.assertEqual(args.lora_modules, expected) | ||
|
||
def test_valid_json_format(self): | ||
# Test valid JSON format input | ||
args = self.parser.parse_args([ | ||
|
||
def test_invalid_type_error(serve_parser): | ||
# Test type error when values are not JSON or key=value | ||
with pytest.raises(SystemExit): | ||
serve_parser.parse_args([ | ||
'--lora-modules', | ||
json.dumps(LORA_MODULE), | ||
'invalid_format' # This is not JSON or key=value format | ||
]) | ||
expected = [ | ||
LoRAModulePath(name='module2', | ||
path='/path/to/module2', | ||
base_model_name='llama') | ||
] | ||
self.assertEqual(args.lora_modules, expected) | ||
|
||
def test_invalid_json_format(self): | ||
# Test invalid JSON format input, missing closing brace | ||
with self.assertRaises(SystemExit): | ||
self.parser.parse_args([ | ||
'--lora-modules', | ||
'{"name": "module3", "path": "/path/to/module3"' | ||
]) | ||
|
||
def test_invalid_type_error(self): | ||
# Test type error when values are not JSON or key=value | ||
with self.assertRaises(SystemExit): | ||
self.parser.parse_args([ | ||
'--lora-modules', | ||
'invalid_format' # This is not JSON or key=value format | ||
]) | ||
|
||
def test_invalid_json_field(self): | ||
# Test valid JSON format but missing required fields | ||
with self.assertRaises(SystemExit): | ||
self.parser.parse_args([ | ||
'--lora-modules', | ||
'{"name": "module4"}' # Missing required 'path' field | ||
]) | ||
|
||
def test_empty_values(self): | ||
# Test when no LoRA modules are provided | ||
args = self.parser.parse_args(['--lora-modules', '']) | ||
self.assertEqual(args.lora_modules, []) | ||
|
||
def test_multiple_valid_inputs(self): | ||
# Test multiple valid inputs (both old and JSON format) | ||
args = self.parser.parse_args([ | ||
|
||
|
||
def test_invalid_json_field(serve_parser): | ||
# Test valid JSON format but missing required fields | ||
with pytest.raises(SystemExit): | ||
serve_parser.parse_args([ | ||
'--lora-modules', | ||
'module1=/path/to/module1', | ||
json.dumps(LORA_MODULE), | ||
'{"name": "module4"}' # Missing required 'path' field | ||
]) | ||
expected = [ | ||
LoRAModulePath(name='module1', path='/path/to/module1'), | ||
LoRAModulePath(name='module2', | ||
path='/path/to/module2', | ||
base_model_name='llama') | ||
] | ||
self.assertEqual(args.lora_modules, expected) | ||
|
||
|
||
if __name__ == '__main__': | ||
unittest.main() | ||
def test_empty_values(serve_parser): | ||
# Test when no LoRA modules are provided | ||
args = serve_parser.parse_args(['--lora-modules', '']) | ||
assert args.lora_modules == [] | ||
|
||
|
||
def test_multiple_valid_inputs(serve_parser): | ||
# Test multiple valid inputs (both old and JSON format) | ||
args = serve_parser.parse_args([ | ||
'--lora-modules', | ||
'module1=/path/to/module1', | ||
json.dumps(LORA_MODULE), | ||
]) | ||
expected = [ | ||
LoRAModulePath(name='module1', path='/path/to/module1'), | ||
LoRAModulePath(name='module2', | ||
path='/path/to/module2', | ||
base_model_name='llama') | ||
] | ||
assert args.lora_modules == expected | ||
|
||
|
||
### Tests for serve argument validation that run prior to loading | ||
def test_enable_auto_choice_passes_without_tool_call_parser(serve_parser): | ||
"""Ensure validation fails if tool choice is enabled with no call parser""" | ||
# If we enable-auto-tool-choice, explode with no tool-call-parser | ||
args = serve_parser.parse_args(args=["--enable-auto-tool-choice"]) | ||
with pytest.raises(TypeError): | ||
validate_parsed_serve_args(args) | ||
|
||
|
||
def test_enable_auto_choice_passes_with_tool_call_parser(serve_parser): | ||
"""Ensure validation passes with tool choice enabled with a call parser""" | ||
args = serve_parser.parse_args(args=[ | ||
"--enable-auto-tool-choice", | ||
"--tool-call-parser", | ||
"mistral", | ||
]) | ||
validate_parsed_serve_args(args) | ||
|
||
|
||
def test_chat_template_validation_for_happy_paths(serve_parser): | ||
"""Ensure validation passes if the chat template exists""" | ||
args = serve_parser.parse_args( | ||
args=["--chat-template", | ||
CHATML_JINJA_PATH.absolute().as_posix()]) | ||
validate_parsed_serve_args(args) | ||
|
||
|
||
def test_chat_template_validation_for_sad_paths(serve_parser): | ||
"""Ensure validation fails if the chat template doesn't exist""" | ||
args = serve_parser.parse_args(args=["--chat-template", "does/not/exist"]) | ||
with pytest.raises(ValueError): | ||
validate_parsed_serve_args(args) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The only new tests are the bottom 4 - I pulled the setup into a fixture and removed the test class since that's how most of the tests I've seen in vLLM have been written, and IIRC subclassing from
unittest.TestCase
is an issue for things likepytest.mark.parametrize
, which I could see being nice for validation tests in the future.Happy to put it back and move the tests into the class if there is a reason they were written this way though!