From 7f92511044a1dda083b57b53a968925d1c2c3d42 Mon Sep 17 00:00:00 2001 From: Roger Wang Date: Thu, 12 Sep 2024 09:48:44 -0700 Subject: [PATCH 1/3] patch --- vllm/engine/arg_utils.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 6f58c39162087..3262f5b81b86d 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -843,6 +843,13 @@ def create_engine_config(self) -> EngineConfig: device_config = DeviceConfig(device=self.device) model_config = self.create_model_config() + if model_config.is_multimodal_model: + if self.enable_prefix_caching: + logger.warning( + "--enable-prefix-caching is currently not " + "supported for multimodal models and has been disabled.") + self.enable_prefix_caching = False + cache_config = CacheConfig( block_size=self.block_size if self.device != "neuron" else self.max_model_len, # neuron needs block_size = max_model_len @@ -874,7 +881,10 @@ def create_engine_config(self) -> EngineConfig: # If not explicitly set, enable chunked prefill by default for # long context (> 32K) models. This is to avoid OOM errors in the # initial memory profiling phase. - if use_long_context: + + # Chunked prefill is currently disabled for multimodal models by + # default. + if use_long_context and model_config.is_multimodal_model: is_gpu = device_config.device_type == "cuda" use_sliding_window = (model_config.get_sliding_window() is not None) From ce7d366b400d6852f9f433f6f7c784f83b834156 Mon Sep 17 00:00:00 2001 From: Roger Wang Date: Thu, 12 Sep 2024 10:42:58 -0700 Subject: [PATCH 2/3] address typo --- vllm/engine/arg_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 3262f5b81b86d..b5eba9ca3727a 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -884,7 +884,7 @@ def create_engine_config(self) -> EngineConfig: # Chunked prefill is currently disabled for multimodal models by # default. - if use_long_context and model_config.is_multimodal_model: + if use_long_context and not model_config.is_multimodal_model: is_gpu = device_config.device_type == "cuda" use_sliding_window = (model_config.get_sliding_window() is not None) From cb965dbb7641db6b290c4b0b215bb63a399c7ba6 Mon Sep 17 00:00:00 2001 From: Roger Wang Date: Thu, 12 Sep 2024 10:43:51 -0700 Subject: [PATCH 3/3] preserve alphabetical order of list of models --- vllm/model_executor/models/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/models/__init__.py b/vllm/model_executor/models/__init__.py index 2c01eb380c375..250f75b639a5b 100644 --- a/vllm/model_executor/models/__init__.py +++ b/vllm/model_executor/models/__init__.py @@ -90,12 +90,12 @@ "PaliGemmaForConditionalGeneration": ("paligemma", "PaliGemmaForConditionalGeneration"), "Phi3VForCausalLM": ("phi3v", "Phi3VForCausalLM"), - "UltravoxModel": ("ultravox", "UltravoxModel"), - "QWenLMHeadModel": ("qwen", "QWenLMHeadModel"), "PixtralForConditionalGeneration": ("pixtral", "PixtralForConditionalGeneration"), + "QWenLMHeadModel": ("qwen", "QWenLMHeadModel"), "Qwen2VLForConditionalGeneration": ("qwen2_vl", "Qwen2VLForConditionalGeneration"), + "UltravoxModel": ("ultravox", "UltravoxModel"), } _CONDITIONAL_GENERATION_MODELS = { "BartModel": ("bart", "BartForConditionalGeneration"),