diff --git a/vllm/entrypoints/openai/run_batch.py b/vllm/entrypoints/openai/run_batch.py index b0c0f4ad2f47e..2f18701870a6f 100644 --- a/vllm/entrypoints/openai/run_batch.py +++ b/vllm/entrypoints/openai/run_batch.py @@ -58,7 +58,7 @@ async def read_file(path_or_url: str) -> str: session.get(path_or_url) as resp: return await resp.text() else: - with open(path_or_url, "r") as f: + with open(path_or_url, "r", encoding="utf-8") as f: return f.read() @@ -71,7 +71,7 @@ async def write_file(path_or_url: str, data: str) -> None: # We should make this async, but as long as this is always run as a # standalone program, blocking the event loop won't effect performance # in this particular case. - with open(path_or_url, "w") as f: + with open(path_or_url, "w", encoding="utf-8") as f: f.write(data)