From b2ec858fcb0a25c8bd0394c03e2d7a6ad579a965 Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Thu, 29 Aug 2024 21:28:01 -0700 Subject: [PATCH 01/19] Monkeypatch for Qwen2-VL --- setup.py | 6 +- src/liger_kernel/transformers/__init__.py | 1 + .../transformers/model/qwen2_vl.py | 158 ++++++++++++++++++ src/liger_kernel/transformers/monkey_patch.py | 48 ++++++ test/convergence/test_mini_models.py | 76 ++++++++- .../convergence/test_mini_models_no_logits.py | 77 ++++++++- test/transformers/test_auto_model.py | 55 ++++++ test/transformers/test_monkey_patch.py | 1 + 8 files changed, 411 insertions(+), 11 deletions(-) create mode 100644 src/liger_kernel/transformers/model/qwen2_vl.py diff --git a/setup.py b/setup.py index 94aa4d10f..5ba382c24 100644 --- a/setup.py +++ b/setup.py @@ -28,9 +28,9 @@ keywords="triton,kernels,LLM training,deep learning,Hugging Face,PyTorch,GPU optimization", include_package_data=True, install_requires=[ - "torch>=2.1.2", - "triton>=2.3.0", - "transformers>=4.42.0", + "torch>=2.4.0", + "triton>=3.0.0", + "transformers @ git+https://github.com/huggingface/transformers.git", ], extras_require={ "dev": [ diff --git a/src/liger_kernel/transformers/__init__.py b/src/liger_kernel/transformers/__init__.py index ef34bd27e..156fd3edd 100644 --- a/src/liger_kernel/transformers/__init__.py +++ b/src/liger_kernel/transformers/__init__.py @@ -9,4 +9,5 @@ apply_liger_kernel_to_mixtral, apply_liger_kernel_to_phi3, apply_liger_kernel_to_qwen2, + apply_liger_kernel_to_qwen2_vl, ) diff --git a/src/liger_kernel/transformers/model/qwen2_vl.py b/src/liger_kernel/transformers/model/qwen2_vl.py new file mode 100644 index 000000000..80648aab1 --- /dev/null +++ b/src/liger_kernel/transformers/model/qwen2_vl.py @@ -0,0 +1,158 @@ +from typing import List, Optional, Tuple, Union + +import torch +from torch.nn import CrossEntropyLoss +from transformers.models.qwen2_vl.modeling_qwen2_vl import ( + _CONFIG_FOR_DOC, + QWEN2_VL_INPUTS_DOCSTRING, + Qwen2VLCausalLMOutputWithPast +) +from transformers.utils import ( + add_start_docstrings_to_model_forward, + replace_return_docstrings, +) + +from liger_kernel.transformers.fused_linear_cross_entropy import ( + LigerFusedLinearCrossEntropyLoss, +) + + +@add_start_docstrings_to_model_forward(QWEN2_VL_INPUTS_DOCSTRING) +@replace_return_docstrings(output_type=Qwen2VLCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) +def lce_forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + pixel_values: Optional[torch.Tensor] = None, + pixel_values_videos: Optional[torch.FloatTensor] = None, + image_grid_thw: Optional[torch.LongTensor] = None, + video_grid_thw: Optional[torch.LongTensor] = None, + rope_deltas: Optional[torch.LongTensor] = None, +) -> Union[Tuple, Qwen2VLCausalLMOutputWithPast]: + r""" + Copy paste Qwen2VL's forward but replace torch cross entropy with liger fused linear cross entropy + + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, Qwen2VLForConditionalGeneration + + >>> model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct") + >>> processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct") + + >>> messages = [ + { + "role": "user", + "content": [ + {"type": "image"}, + {"type": "text", "text": "What is shown in this image?"}, + ], + }, + ] + >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + >>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos]) + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ..." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if inputs_embeds is None: + inputs_embeds = self.model.embed_tokens(input_ids) + if pixel_values is not None: + pixel_values = pixel_values.type(self.visual.get_dtype()) + image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw).to(inputs_embeds.device) + image_mask = input_ids == self.config.image_token_id + if self.training: + inputs_embeds = inputs_embeds.clone() + inputs_embeds[image_mask] = image_embeds + if pixel_values_videos is not None: + pixel_values_videos = pixel_values_videos.type(self.visual.get_dtype()) + video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw).to(inputs_embeds.device) + video_mask = input_ids == self.config.video_token_id + inputs_embeds[video_mask] = video_embeds + if attention_mask is not None: + attention_mask = attention_mask.to(inputs_embeds.device) + + outputs = self.model( + input_ids=None, + position_ids=position_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + + loss = None + logits = None + + if self.training and (labels is not None): + shift_hidden_states = hidden_states[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + + # Flatten tokens + shift_hidden_states = shift_hidden_states.view(-1, self.config.hidden_size) + shift_labels = shift_labels.view(-1) + + lce = LigerFusedLinearCrossEntropyLoss() + loss = lce(self.lm_head.weight, shift_hidden_states, shift_labels) + else: + logits = self.lm_head(hidden_states) + logits = logits.float() + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return Qwen2VLCausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + rope_deltas=rope_deltas, + ) \ No newline at end of file diff --git a/src/liger_kernel/transformers/monkey_patch.py b/src/liger_kernel/transformers/monkey_patch.py index ab80668cf..90611799c 100644 --- a/src/liger_kernel/transformers/monkey_patch.py +++ b/src/liger_kernel/transformers/monkey_patch.py @@ -10,6 +10,7 @@ from liger_kernel.transformers.model.phi3 import lce_forward as phi3_lce_forward from liger_kernel.transformers.model.qwen2 import lce_forward as qwen2_lce_forward from liger_kernel.transformers.rms_norm import LigerRMSNorm +from liger_kernel.transformers.layer_norm import LigerLayerNorm from liger_kernel.transformers.rope import liger_rotary_pos_emb from liger_kernel.transformers.swiglu import ( LigerBlockSparseTop2MLP, @@ -233,6 +234,52 @@ def apply_liger_kernel_to_qwen2( modeling_qwen2.Qwen2MLP = LigerSwiGLUMLP +def apply_liger_kernel_to_qwen2_vl( + cross_entropy: bool = False, + fused_linear_cross_entropy: bool = True, + rms_norm: bool = True, + layer_norm: bool = True, + swiglu: bool = True, +) -> None: + """ + Apply Liger kernels to replace original implementation in HuggingFace Qwen2-VL models + + Args: + cross_entropy (bool): Whether to apply Liger's cross entropy loss. Default is False. + fused_linear_cross_entropy (bool): + Whether to apply Liger's fused linear cross entropy loss. Default is True. + `cross_entropy` and `fused_linear_cross_entropy` cannot both be True. + If `fused_linear_cross_entropy` is True, the logits will not be materialized but more memory efficient. + rms_norm (bool): Whether to apply Liger's RMSNorm. Default is True. + layer_norm (bool): Whether to apply Liger's LayerNorm. Default is True. + swiglu (bool): Whether to apply Liger's SwiGLU MLP. Default is True. + """ + assert not ( + cross_entropy and fused_linear_cross_entropy + ), "cross_entropy and fused_linear_cross_entropy cannot both be True." + + from transformers.models.qwen2_vl import modeling_qwen2_vl + + # Qwen2 VL isnt supported in the lower versions of transformers that + # liger_kernel supports so we need to shield all qwen2_vl imports + from liger_kernel.transformers.model.qwen2_vl import lce_forward as qwen2_vl_lce_forward + + # Qwen2 VL has two rope implementations, neither of which is like liger_rotary_pos_emb + # if rope: + # modeling_qwen2_vl.apply_multimodal_rotary_pos_emb = ... + # modeling_qwen2_vl.apply_rotary_pos_emb_vision = ... + if rms_norm: + modeling_qwen2_vl.Qwen2RMSNorm = LigerRMSNorm + if layer_norm: + modeling_qwen2_vl.LayerNorm = LigerLayerNorm + if cross_entropy: + modeling_qwen2_vl.CrossEntropyLoss = LigerCrossEntropyLoss + if fused_linear_cross_entropy: + modeling_qwen2_vl.Qwen2VLForConditionalGeneration.forward = qwen2_vl_lce_forward + if swiglu: + modeling_qwen2_vl.Qwen2MLP = LigerSwiGLUMLP + + def apply_liger_kernel_to_phi3( rope: bool = True, cross_entropy: bool = False, @@ -279,6 +326,7 @@ def apply_liger_kernel_to_phi3( "mistral": apply_liger_kernel_to_mistral, "mixtral": apply_liger_kernel_to_mixtral, "qwen2": apply_liger_kernel_to_qwen2, + "qwen2_vl": apply_liger_kernel_to_qwen2_vl, "phi3": apply_liger_kernel_to_phi3, } diff --git a/test/convergence/test_mini_models.py b/test/convergence/test_mini_models.py index 95c832e15..0882d7dea 100644 --- a/test/convergence/test_mini_models.py +++ b/test/convergence/test_mini_models.py @@ -20,6 +20,8 @@ from transformers.models.mixtral import MixtralConfig, MixtralForCausalLM from transformers.models.phi3 import Phi3Config, Phi3ForCausalLM from transformers.models.qwen2 import Qwen2Config, Qwen2ForCausalLM +from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLConfig +from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLForConditionalGeneration from liger_kernel.transformers import ( apply_liger_kernel_to_gemma, @@ -29,6 +31,7 @@ apply_liger_kernel_to_mixtral, apply_liger_kernel_to_phi3, apply_liger_kernel_to_qwen2, + apply_liger_kernel_to_qwen2_vl, ) torch.use_deterministic_algorithms(True) @@ -253,6 +256,50 @@ attn_implementation="sdpa", # default value, pytorch native attention ), ), + "mini_qwen2_vl": MiniModelConfig( + liger_kernel_patch_func=functools.partial( + apply_liger_kernel_to_qwen2_vl, fused_linear_cross_entropy=False + ), + model_class=Qwen2VLForConditionalGeneration, + mini_model_config=Qwen2VLConfig( + attention_dropout=0.0, + bos_token_id=1, # 151643 + eos_token_id=2, # 151645 + hidden_act="silu", + hidden_size=1536, # 8192 + initializer_range=0.02, + intermediate_size=4864, # 29568 + max_position_embeddings=32768, + max_window_layers=4, # 80 + num_attention_heads=12, # 64 + num_hidden_layers=4, # 80 + num_key_value_heads=2, # 8 + rms_norm_eps=1e-6, # 1e-5 + rope_theta=1000000.0, + rope_scaling=dict( + type="mrope", + mrope_section=[16, 24, 24], # (temporal, height, width) + ), + sliding_window=4096, # 4096 + tie_word_embeddings=True, # False + use_cache=True, + vocab_size=32000, # 152064 + use_sliding_window=False, + vision_config={ + "depth": 4, # 32 + "embed_dim": 1280, + "mlp_ratio": 4, + "num_heads": 16, + "in_chans": 3, + "hidden_size": 128, # 1536 + "patch_size": 14, + "spatial_merge_size": 2, + "spatial_patch_size": 14, + "temporal_patch_size": 2 + }, + attn_implementation="sdpa", + ), + ), "mini_phi3": MiniModelConfig( liger_kernel_patch_func=functools.partial( apply_liger_kernel_to_phi3, fused_linear_cross_entropy=False @@ -308,14 +355,22 @@ def run_mini_model( if with_liger is True: kwargs = { - "rope": True, "rms_norm": True, "cross_entropy": True, } + model_supports_rope = "qwen2_vl" not in model_name + if model_supports_rope: + kwargs["rope"] = True + + model_supports_layer_norm = "qwen2_vl" in model_name + if model_supports_layer_norm: + kwargs["layer_norm"] = True + if "gemma" in model_name: kwargs["geglu"] = True else: kwargs["swiglu"] = True + MINI_MODEL_SETUPS[model_name].liger_kernel_patch_func(**kwargs) model = create_model(model_name).to(dtype).to("cuda") @@ -343,7 +398,7 @@ def run_mini_model( @pytest.mark.parametrize( "model_name, num_steps, lr, dtype, loss_atol, loss_rtol, logits_atol, logits_rtol, param_atol, param_rtol", [ - # Gemma 1.1 and 2 has more tolerance because currently, the kernel is not a perfect match (casts are not done the same way) + # Gemma 1 has more tolerance because currently, the kernel is not a perfect match (casts are not done the same way) ("mini_gemma1", 32, 1e-4, torch.float32, 1e-8, 6e-4, 5e-3, 1e-5, 5e-3, 1e-5), pytest.param( "mini_gemma1", @@ -444,6 +499,23 @@ def run_mini_model( not supports_bfloat16(), reason="bfloat16 not supported on this GPU" ), ), + # A LOT More loss tolerance for qwen2_vl float32 + ("mini_qwen2_vl", 32, 1e-4, torch.float32, 1e-4, 6e-4, 5e-3, 1e-5, 5e-3, 1e-5), + pytest.param( + "mini_qwen2_vl", + 32, + 1e-4, + torch.bfloat16, + 1e-8, + 1e-5, + 1e-2, + 1e-5, + 1e-2, + 1e-5, + marks=pytest.mark.skipif( + not supports_bfloat16(), reason="bfloat16 not supported on this GPU" + ), + ), ("mini_phi3", 32, 1e-4, torch.float32, 1e-8, 1e-5, 5e-3, 1e-5, 5e-3, 1e-5), pytest.param( "mini_phi3", diff --git a/test/convergence/test_mini_models_no_logits.py b/test/convergence/test_mini_models_no_logits.py index 3a3272f8d..d53df4203 100644 --- a/test/convergence/test_mini_models_no_logits.py +++ b/test/convergence/test_mini_models_no_logits.py @@ -17,6 +17,8 @@ from transformers.models.mistral import MistralConfig, MistralForCausalLM from transformers.models.phi3 import Phi3Config, Phi3ForCausalLM from transformers.models.qwen2 import Qwen2Config, Qwen2ForCausalLM +from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLConfig +from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLForConditionalGeneration from liger_kernel.transformers import ( apply_liger_kernel_to_gemma, @@ -25,6 +27,7 @@ apply_liger_kernel_to_mistral, apply_liger_kernel_to_phi3, apply_liger_kernel_to_qwen2, + apply_liger_kernel_to_qwen2_vl, ) MINI_MODEL_SETUPS = { @@ -81,11 +84,49 @@ tie_word_embeddings=True, use_cache=True, vocab_size=32000, # 151936 - # At rope backward - # Eager produces incontiguous dq and dk - # SDPA produces contiguous dq and incontiguous dk - # Flash_attn produces contiguous dq and dk - attn_implementation="sdpa", # default value, pytorch native attention + attn_implementation="sdpa", + ), + ), + "mini_qwen2_vl": MiniModelConfig( + liger_kernel_patch_func=apply_liger_kernel_to_qwen2_vl, + model_class=Qwen2VLForConditionalGeneration, + mini_model_config=Qwen2VLConfig( + attention_dropout=0.0, + bos_token_id=1, # 151643 + eos_token_id=2, # 151645 + hidden_act="silu", + hidden_size=1536, # 8192 + initializer_range=0.02, + intermediate_size=4864, # 29568 + max_position_embeddings=32768, + max_window_layers=4, # 80 + num_attention_heads=12, # 64 + num_hidden_layers=4, # 80 + num_key_value_heads=2, # 8 + rms_norm_eps=1e-6, # 1e-5 + rope_theta=1000000.0, + rope_scaling=dict( + type="mrope", + mrope_section=[16, 24, 24], # (temporal, height, width) + ), + sliding_window=4096, # 4096 + tie_word_embeddings=True, # False + use_cache=True, + vocab_size=32000, # 152064 + use_sliding_window=False, + vision_config={ + "depth": 4, # 32 + "embed_dim": 1280, + "mlp_ratio": 4, + "num_heads": 16, + "in_chans": 3, + "hidden_size": 128, # 1536 + "patch_size": 14, + "spatial_merge_size": 2, + "spatial_patch_size": 14, + "temporal_patch_size": 2 + }, + attn_implementation="sdpa", ), ), "mini_phi3": MiniModelConfig( @@ -230,6 +271,7 @@ def create_model(model_name="mini_llama3"): The commented values are the original values """ model_config = MINI_MODEL_SETUPS[model_name].mini_model_config + print(model_config) model_class = MINI_MODEL_SETUPS[model_name].model_class return model_class(model_config) @@ -250,9 +292,16 @@ def run_mini_model( if with_liger is True: kwargs = { - "rope": True, "rms_norm": True, } + model_supports_rope = "qwen2_vl" not in model_name + if model_supports_rope: + kwargs["rope"] = True + + model_supports_layer_norm = "qwen2_vl" in model_name + if model_supports_layer_norm: + kwargs["layer_norm"] = True + if "gemma" in model_name: kwargs["geglu"] = True else: @@ -323,6 +372,22 @@ def run_mini_model( not supports_bfloat16(), reason="bfloat16 not supported on this GPU" ), ), + ("mini_qwen2_vl", 32, 1e-4, torch.float32, 1e-8, 1e-5, 5e-3, 1e-5, 5e-3, 1e-5), + pytest.param( + "mini_qwen2_vl", + 32, + 1e-4, + torch.bfloat16, + 1e-8, + 1e-5, + 1e-2, + 1e-5, + 1e-2, + 1e-5, + marks=pytest.mark.skipif( + not supports_bfloat16(), reason="bfloat16 not supported on this GPU" + ), + ), ("mini_phi3", 32, 1e-4, torch.float32, 1e-8, 1e-5, 5e-3, 1e-5, 5e-3, 1e-5), pytest.param( "mini_phi3", diff --git a/test/transformers/test_auto_model.py b/test/transformers/test_auto_model.py index 26e5a9d2b..ec3393a98 100644 --- a/test/transformers/test_auto_model.py +++ b/test/transformers/test_auto_model.py @@ -64,3 +64,58 @@ def test_auto_liger_kernel_for_causal_lm_from_pretrained(): pretrained_model_name_or_path, *model_args, **valid_kwargs ) assert model == "mock_model" + + +# def test_auto_liger_kernel_for_conditional_generation_from_pretrained(): +# pretrained_model_name_or_path = "/path/to/qwen2vl/model" +# model_args = ("model_arg1", "model_arg2") + +# valid_kwargs = { +# "valid_arg_1": "some_value_1", +# "valid_arg_2": 10, +# } + +# # This arg should be filtered out as it is not part of the model config +# invalid_kwargs = { +# "invalid_arg": "another_value", +# } + +# # These args should be passed through to apply_liger_kernel_to_qwen2_vl fn +# apply_liger_kernel_kwargs = { +# "rms_norm": False, +# "swiglu": True, +# } + +# kwargs = {**valid_kwargs, **invalid_kwargs, **apply_liger_kernel_kwargs} + +# # Mock the model config instance returned from AutoConfig.from_pretrained() +# mock_model_config = MagicMock() +# mock_model_config.__dict__ = { +# "model_type": "qwen2_vl", +# "valid_arg_1": "", +# "valid_arg_2": 0, +# } +# mock_qwen2_vl = mock.Mock() + +# with patch.dict( +# MODEL_TYPE_TO_APPLY_LIGER_FN, {"qwen2_vl": mock_qwen2_vl} +# ), mock.patch.object( +# AutoConfig, "from_pretrained", return_value=mock_model_config +# ), mock.patch.object( +# AutoModelForCausalLM, "from_pretrained", return_value="mock_model" +# ) as mock_super_from_pretrained: + +# # Mock the function signature of apply_liger_kernel_to_llama +# mock_qwen2_vl.__signature__ = signature(apply_liger_kernel_to_qwen2_vl) + +# model = AutoLigerKernelForConditionalGeneration.from_pretrained( +# pretrained_model_name_or_path, *model_args, **kwargs +# ) + +# # Check that the apply_liger_kernel_to_llama mock was called with the correct kwargs +# mock_qwen2_vl.assert_called_once_with(rms_norm=False, swiglu=True) +# # Check that only valid kwargs are passed to super().from_pretrained +# mock_super_from_pretrained.assert_called_once_with( +# pretrained_model_name_or_path, *model_args, **valid_kwargs +# ) +# assert model == "mock_model" diff --git a/test/transformers/test_monkey_patch.py b/test/transformers/test_monkey_patch.py index 0db1d258a..4116287fd 100644 --- a/test/transformers/test_monkey_patch.py +++ b/test/transformers/test_monkey_patch.py @@ -22,6 +22,7 @@ def test_import_from_root(): apply_liger_kernel_to_mixtral, apply_liger_kernel_to_phi3, apply_liger_kernel_to_qwen2, + apply_liger_kernel_to_qwen2_vl, ) except Exception: pytest.fail("Import kernel patch from root fails") From 308b2a830ad9208d5412dc0ee8546256d77ef3fc Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Thu, 29 Aug 2024 21:34:05 -0700 Subject: [PATCH 02/19] Checkstyle --- .../transformers/model/qwen2_vl.py | 30 ++++++++++++++----- src/liger_kernel/transformers/monkey_patch.py | 6 ++-- test/convergence/test_mini_models.py | 8 +++-- .../convergence/test_mini_models_no_logits.py | 8 +++-- 4 files changed, 36 insertions(+), 16 deletions(-) diff --git a/src/liger_kernel/transformers/model/qwen2_vl.py b/src/liger_kernel/transformers/model/qwen2_vl.py index 80648aab1..cfb7a905b 100644 --- a/src/liger_kernel/transformers/model/qwen2_vl.py +++ b/src/liger_kernel/transformers/model/qwen2_vl.py @@ -5,7 +5,7 @@ from transformers.models.qwen2_vl.modeling_qwen2_vl import ( _CONFIG_FOR_DOC, QWEN2_VL_INPUTS_DOCSTRING, - Qwen2VLCausalLMOutputWithPast + Qwen2VLCausalLMOutputWithPast, ) from transformers.utils import ( add_start_docstrings_to_model_forward, @@ -18,7 +18,9 @@ @add_start_docstrings_to_model_forward(QWEN2_VL_INPUTS_DOCSTRING) -@replace_return_docstrings(output_type=Qwen2VLCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) +@replace_return_docstrings( + output_type=Qwen2VLCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC +) def lce_forward( self, input_ids: torch.LongTensor = None, @@ -79,24 +81,36 @@ def lce_forward( "The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ..." ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is None: inputs_embeds = self.model.embed_tokens(input_ids) if pixel_values is not None: pixel_values = pixel_values.type(self.visual.get_dtype()) - image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw).to(inputs_embeds.device) + image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw).to( + inputs_embeds.device + ) image_mask = input_ids == self.config.image_token_id if self.training: inputs_embeds = inputs_embeds.clone() inputs_embeds[image_mask] = image_embeds if pixel_values_videos is not None: pixel_values_videos = pixel_values_videos.type(self.visual.get_dtype()) - video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw).to(inputs_embeds.device) + video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw).to( + inputs_embeds.device + ) video_mask = input_ids == self.config.video_token_id inputs_embeds[video_mask] = video_embeds if attention_mask is not None: @@ -155,4 +169,4 @@ def lce_forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=rope_deltas, - ) \ No newline at end of file + ) diff --git a/src/liger_kernel/transformers/monkey_patch.py b/src/liger_kernel/transformers/monkey_patch.py index 90611799c..a79d704c5 100644 --- a/src/liger_kernel/transformers/monkey_patch.py +++ b/src/liger_kernel/transformers/monkey_patch.py @@ -4,13 +4,13 @@ from liger_kernel.transformers.cross_entropy import LigerCrossEntropyLoss from liger_kernel.transformers.geglu import LigerGEGLUMLP +from liger_kernel.transformers.layer_norm import LigerLayerNorm from liger_kernel.transformers.model.gemma import lce_forward as gemma_lce_forward from liger_kernel.transformers.model.llama import lce_forward as llama_lce_forward from liger_kernel.transformers.model.mistral import lce_forward as mistral_lce_forward from liger_kernel.transformers.model.phi3 import lce_forward as phi3_lce_forward from liger_kernel.transformers.model.qwen2 import lce_forward as qwen2_lce_forward from liger_kernel.transformers.rms_norm import LigerRMSNorm -from liger_kernel.transformers.layer_norm import LigerLayerNorm from liger_kernel.transformers.rope import liger_rotary_pos_emb from liger_kernel.transformers.swiglu import ( LigerBlockSparseTop2MLP, @@ -262,7 +262,9 @@ def apply_liger_kernel_to_qwen2_vl( # Qwen2 VL isnt supported in the lower versions of transformers that # liger_kernel supports so we need to shield all qwen2_vl imports - from liger_kernel.transformers.model.qwen2_vl import lce_forward as qwen2_vl_lce_forward + from liger_kernel.transformers.model.qwen2_vl import ( + lce_forward as qwen2_vl_lce_forward, + ) # Qwen2 VL has two rope implementations, neither of which is like liger_rotary_pos_emb # if rope: diff --git a/test/convergence/test_mini_models.py b/test/convergence/test_mini_models.py index 0882d7dea..4dafffc9d 100644 --- a/test/convergence/test_mini_models.py +++ b/test/convergence/test_mini_models.py @@ -21,7 +21,9 @@ from transformers.models.phi3 import Phi3Config, Phi3ForCausalLM from transformers.models.qwen2 import Qwen2Config, Qwen2ForCausalLM from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLConfig -from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLForConditionalGeneration +from transformers.models.qwen2_vl.modeling_qwen2_vl import ( + Qwen2VLForConditionalGeneration, +) from liger_kernel.transformers import ( apply_liger_kernel_to_gemma, @@ -274,7 +276,7 @@ num_attention_heads=12, # 64 num_hidden_layers=4, # 80 num_key_value_heads=2, # 8 - rms_norm_eps=1e-6, # 1e-5 + rms_norm_eps=1e-6, # 1e-5 rope_theta=1000000.0, rope_scaling=dict( type="mrope", @@ -295,7 +297,7 @@ "patch_size": 14, "spatial_merge_size": 2, "spatial_patch_size": 14, - "temporal_patch_size": 2 + "temporal_patch_size": 2, }, attn_implementation="sdpa", ), diff --git a/test/convergence/test_mini_models_no_logits.py b/test/convergence/test_mini_models_no_logits.py index d53df4203..7719c2067 100644 --- a/test/convergence/test_mini_models_no_logits.py +++ b/test/convergence/test_mini_models_no_logits.py @@ -18,7 +18,9 @@ from transformers.models.phi3 import Phi3Config, Phi3ForCausalLM from transformers.models.qwen2 import Qwen2Config, Qwen2ForCausalLM from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLConfig -from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLForConditionalGeneration +from transformers.models.qwen2_vl.modeling_qwen2_vl import ( + Qwen2VLForConditionalGeneration, +) from liger_kernel.transformers import ( apply_liger_kernel_to_gemma, @@ -103,7 +105,7 @@ num_attention_heads=12, # 64 num_hidden_layers=4, # 80 num_key_value_heads=2, # 8 - rms_norm_eps=1e-6, # 1e-5 + rms_norm_eps=1e-6, # 1e-5 rope_theta=1000000.0, rope_scaling=dict( type="mrope", @@ -124,7 +126,7 @@ "patch_size": 14, "spatial_merge_size": 2, "spatial_patch_size": 14, - "temporal_patch_size": 2 + "temporal_patch_size": 2, }, attn_implementation="sdpa", ), From 95b4b1d8bc5d8b932718f1fe74947230f6cae955 Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Thu, 29 Aug 2024 21:36:07 -0700 Subject: [PATCH 03/19] Revert automodel --- test/transformers/test_auto_model.py | 55 ---------------------------- 1 file changed, 55 deletions(-) diff --git a/test/transformers/test_auto_model.py b/test/transformers/test_auto_model.py index ec3393a98..26e5a9d2b 100644 --- a/test/transformers/test_auto_model.py +++ b/test/transformers/test_auto_model.py @@ -64,58 +64,3 @@ def test_auto_liger_kernel_for_causal_lm_from_pretrained(): pretrained_model_name_or_path, *model_args, **valid_kwargs ) assert model == "mock_model" - - -# def test_auto_liger_kernel_for_conditional_generation_from_pretrained(): -# pretrained_model_name_or_path = "/path/to/qwen2vl/model" -# model_args = ("model_arg1", "model_arg2") - -# valid_kwargs = { -# "valid_arg_1": "some_value_1", -# "valid_arg_2": 10, -# } - -# # This arg should be filtered out as it is not part of the model config -# invalid_kwargs = { -# "invalid_arg": "another_value", -# } - -# # These args should be passed through to apply_liger_kernel_to_qwen2_vl fn -# apply_liger_kernel_kwargs = { -# "rms_norm": False, -# "swiglu": True, -# } - -# kwargs = {**valid_kwargs, **invalid_kwargs, **apply_liger_kernel_kwargs} - -# # Mock the model config instance returned from AutoConfig.from_pretrained() -# mock_model_config = MagicMock() -# mock_model_config.__dict__ = { -# "model_type": "qwen2_vl", -# "valid_arg_1": "", -# "valid_arg_2": 0, -# } -# mock_qwen2_vl = mock.Mock() - -# with patch.dict( -# MODEL_TYPE_TO_APPLY_LIGER_FN, {"qwen2_vl": mock_qwen2_vl} -# ), mock.patch.object( -# AutoConfig, "from_pretrained", return_value=mock_model_config -# ), mock.patch.object( -# AutoModelForCausalLM, "from_pretrained", return_value="mock_model" -# ) as mock_super_from_pretrained: - -# # Mock the function signature of apply_liger_kernel_to_llama -# mock_qwen2_vl.__signature__ = signature(apply_liger_kernel_to_qwen2_vl) - -# model = AutoLigerKernelForConditionalGeneration.from_pretrained( -# pretrained_model_name_or_path, *model_args, **kwargs -# ) - -# # Check that the apply_liger_kernel_to_llama mock was called with the correct kwargs -# mock_qwen2_vl.assert_called_once_with(rms_norm=False, swiglu=True) -# # Check that only valid kwargs are passed to super().from_pretrained -# mock_super_from_pretrained.assert_called_once_with( -# pretrained_model_name_or_path, *model_args, **valid_kwargs -# ) -# assert model == "mock_model" From 956eea0c02e93da23833f19ae3def49a53e7a880 Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Mon, 2 Sep 2024 16:57:18 -0700 Subject: [PATCH 04/19] wip --- setup.py | 1 + src/liger_kernel/transformers/monkey_patch.py | 10 +++++--- test/convergence/test_mini_models.py | 24 +++++++++--------- .../convergence/test_mini_models_no_logits.py | 25 +++++++++---------- 4 files changed, 31 insertions(+), 29 deletions(-) diff --git a/setup.py b/setup.py index 5ba382c24..c4edf2691 100644 --- a/setup.py +++ b/setup.py @@ -40,6 +40,7 @@ "isort>=5.13.2", "pytest>=7.1.2", "datasets>=2.19.2", + "pytest-reverse", ] }, ) diff --git a/src/liger_kernel/transformers/monkey_patch.py b/src/liger_kernel/transformers/monkey_patch.py index a79d704c5..7e8526764 100644 --- a/src/liger_kernel/transformers/monkey_patch.py +++ b/src/liger_kernel/transformers/monkey_patch.py @@ -258,20 +258,22 @@ def apply_liger_kernel_to_qwen2_vl( cross_entropy and fused_linear_cross_entropy ), "cross_entropy and fused_linear_cross_entropy cannot both be True." - from transformers.models.qwen2_vl import modeling_qwen2_vl - # Qwen2 VL isnt supported in the lower versions of transformers that # liger_kernel supports so we need to shield all qwen2_vl imports from liger_kernel.transformers.model.qwen2_vl import ( lce_forward as qwen2_vl_lce_forward, ) + from transformers.models.qwen2_vl import modeling_qwen2_vl # Qwen2 VL has two rope implementations, neither of which is like liger_rotary_pos_emb # if rope: # modeling_qwen2_vl.apply_multimodal_rotary_pos_emb = ... # modeling_qwen2_vl.apply_rotary_pos_emb_vision = ... if rms_norm: - modeling_qwen2_vl.Qwen2RMSNorm = LigerRMSNorm + # https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py#L439 + modeling_qwen2_vl.Qwen2RMSNorm = partial( + LigerRMSNorm, offset=1.0, init_fn="ones", casting_mode="gemma" + ) if layer_norm: modeling_qwen2_vl.LayerNorm = LigerLayerNorm if cross_entropy: @@ -279,7 +281,7 @@ def apply_liger_kernel_to_qwen2_vl( if fused_linear_cross_entropy: modeling_qwen2_vl.Qwen2VLForConditionalGeneration.forward = qwen2_vl_lce_forward if swiglu: - modeling_qwen2_vl.Qwen2MLP = LigerSwiGLUMLP + modeling_qwen2_vl.Qwen2MLP = LigerSwiGLUMLP # issue def apply_liger_kernel_to_phi3( diff --git a/test/convergence/test_mini_models.py b/test/convergence/test_mini_models.py index 4dafffc9d..264f5aa79 100644 --- a/test/convergence/test_mini_models.py +++ b/test/convergence/test_mini_models.py @@ -1,13 +1,5 @@ import functools import os -from test.utils import ( - DEFAULT_DATASET_PATH, - MiniModelConfig, - assert_verbose_allclose, - set_seed, - simple_collate_fn, - supports_bfloat16, -) import pytest import torch @@ -35,6 +27,14 @@ apply_liger_kernel_to_qwen2, apply_liger_kernel_to_qwen2_vl, ) +from test.utils import ( + DEFAULT_DATASET_PATH, + MiniModelConfig, + assert_verbose_allclose, + set_seed, + simple_collate_fn, + supports_bfloat16, +) torch.use_deterministic_algorithms(True) @@ -357,8 +357,8 @@ def run_mini_model( if with_liger is True: kwargs = { - "rms_norm": True, - "cross_entropy": True, + "rms_norm": False, + "cross_entropy": False, } model_supports_rope = "qwen2_vl" not in model_name if model_supports_rope: @@ -366,12 +366,12 @@ def run_mini_model( model_supports_layer_norm = "qwen2_vl" in model_name if model_supports_layer_norm: - kwargs["layer_norm"] = True + kwargs["layer_norm"] = False if "gemma" in model_name: kwargs["geglu"] = True else: - kwargs["swiglu"] = True + kwargs["swiglu"] = False MINI_MODEL_SETUPS[model_name].liger_kernel_patch_func(**kwargs) diff --git a/test/convergence/test_mini_models_no_logits.py b/test/convergence/test_mini_models_no_logits.py index 7719c2067..6eb8b09e8 100644 --- a/test/convergence/test_mini_models_no_logits.py +++ b/test/convergence/test_mini_models_no_logits.py @@ -1,12 +1,3 @@ -from test.utils import ( - DEFAULT_DATASET_PATH, - MiniModelConfig, - assert_verbose_allclose, - set_seed, - simple_collate_fn, - supports_bfloat16, -) - import pytest import torch from datasets import load_from_disk @@ -31,6 +22,14 @@ apply_liger_kernel_to_qwen2, apply_liger_kernel_to_qwen2_vl, ) +from test.utils import ( + DEFAULT_DATASET_PATH, + MiniModelConfig, + assert_verbose_allclose, + set_seed, + simple_collate_fn, + supports_bfloat16, +) MINI_MODEL_SETUPS = { "mini_llama3": MiniModelConfig( @@ -294,7 +293,7 @@ def run_mini_model( if with_liger is True: kwargs = { - "rms_norm": True, + "rms_norm": False, } model_supports_rope = "qwen2_vl" not in model_name if model_supports_rope: @@ -307,12 +306,12 @@ def run_mini_model( if "gemma" in model_name: kwargs["geglu"] = True else: - kwargs["swiglu"] = True + kwargs["swiglu"] = False model_support_flce = "gemma2" not in model_name if model_support_flce: - kwargs["fused_linear_cross_entropy"] = True - kwargs["cross_entropy"] = False + kwargs["fused_linear_cross_entropy"] = False + kwargs["cross_entropy"] = True else: kwargs["cross_entropy"] = True From 43d2fe07ed4bfe114f90fc3ff210b39aadd7832d Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Mon, 2 Sep 2024 18:26:57 -0700 Subject: [PATCH 05/19] Passing tests... --- src/liger_kernel/transformers/monkey_patch.py | 4 ++-- test/convergence/test_mini_models.py | 13 ++++++------- test/convergence/test_mini_models_no_logits.py | 11 +++++------ 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/src/liger_kernel/transformers/monkey_patch.py b/src/liger_kernel/transformers/monkey_patch.py index 7e8526764..01d6e1482 100644 --- a/src/liger_kernel/transformers/monkey_patch.py +++ b/src/liger_kernel/transformers/monkey_patch.py @@ -272,7 +272,7 @@ def apply_liger_kernel_to_qwen2_vl( if rms_norm: # https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py#L439 modeling_qwen2_vl.Qwen2RMSNorm = partial( - LigerRMSNorm, offset=1.0, init_fn="ones", casting_mode="gemma" + LigerRMSNorm, init_fn="ones", casting_mode="gemma" ) if layer_norm: modeling_qwen2_vl.LayerNorm = LigerLayerNorm @@ -281,7 +281,7 @@ def apply_liger_kernel_to_qwen2_vl( if fused_linear_cross_entropy: modeling_qwen2_vl.Qwen2VLForConditionalGeneration.forward = qwen2_vl_lce_forward if swiglu: - modeling_qwen2_vl.Qwen2MLP = LigerSwiGLUMLP # issue + modeling_qwen2_vl.Qwen2MLP = LigerSwiGLUMLP def apply_liger_kernel_to_phi3( diff --git a/test/convergence/test_mini_models.py b/test/convergence/test_mini_models.py index 264f5aa79..4a5ace725 100644 --- a/test/convergence/test_mini_models.py +++ b/test/convergence/test_mini_models.py @@ -299,7 +299,7 @@ "spatial_patch_size": 14, "temporal_patch_size": 2, }, - attn_implementation="sdpa", + attn_implementation="eager", # fails with sdpa ), ), "mini_phi3": MiniModelConfig( @@ -357,8 +357,8 @@ def run_mini_model( if with_liger is True: kwargs = { - "rms_norm": False, - "cross_entropy": False, + "rms_norm": True, + "cross_entropy": True, } model_supports_rope = "qwen2_vl" not in model_name if model_supports_rope: @@ -366,12 +366,12 @@ def run_mini_model( model_supports_layer_norm = "qwen2_vl" in model_name if model_supports_layer_norm: - kwargs["layer_norm"] = False + kwargs["layer_norm"] = True if "gemma" in model_name: kwargs["geglu"] = True else: - kwargs["swiglu"] = False + kwargs["swiglu"] = True MINI_MODEL_SETUPS[model_name].liger_kernel_patch_func(**kwargs) @@ -501,8 +501,7 @@ def run_mini_model( not supports_bfloat16(), reason="bfloat16 not supported on this GPU" ), ), - # A LOT More loss tolerance for qwen2_vl float32 - ("mini_qwen2_vl", 32, 1e-4, torch.float32, 1e-4, 6e-4, 5e-3, 1e-5, 5e-3, 1e-5), + ("mini_qwen2_vl", 32, 1e-4, torch.float32, 1e-8, 1e-5, 5e-3, 1e-5, 5e-3, 1e-5), pytest.param( "mini_qwen2_vl", 32, diff --git a/test/convergence/test_mini_models_no_logits.py b/test/convergence/test_mini_models_no_logits.py index 6eb8b09e8..dd72e9c54 100644 --- a/test/convergence/test_mini_models_no_logits.py +++ b/test/convergence/test_mini_models_no_logits.py @@ -127,7 +127,7 @@ "spatial_patch_size": 14, "temporal_patch_size": 2, }, - attn_implementation="sdpa", + attn_implementation="sdpa", # fails with sdpa ), ), "mini_phi3": MiniModelConfig( @@ -272,7 +272,6 @@ def create_model(model_name="mini_llama3"): The commented values are the original values """ model_config = MINI_MODEL_SETUPS[model_name].mini_model_config - print(model_config) model_class = MINI_MODEL_SETUPS[model_name].model_class return model_class(model_config) @@ -293,7 +292,7 @@ def run_mini_model( if with_liger is True: kwargs = { - "rms_norm": False, + "rms_norm": True, } model_supports_rope = "qwen2_vl" not in model_name if model_supports_rope: @@ -306,12 +305,12 @@ def run_mini_model( if "gemma" in model_name: kwargs["geglu"] = True else: - kwargs["swiglu"] = False + kwargs["swiglu"] = True model_support_flce = "gemma2" not in model_name if model_support_flce: - kwargs["fused_linear_cross_entropy"] = False - kwargs["cross_entropy"] = True + kwargs["fused_linear_cross_entropy"] = True + kwargs["cross_entropy"] = False else: kwargs["cross_entropy"] = True From baae5fdd13a0f217a8f7ee57ac54cdbbb9d08e60 Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Mon, 2 Sep 2024 20:44:36 -0700 Subject: [PATCH 06/19] Fix tests w/ sdpa --- setup.py | 1 - test/convergence/test_mini_models.py | 6 +++--- test/convergence/test_mini_models_no_logits.py | 6 +++--- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/setup.py b/setup.py index c4edf2691..5ba382c24 100644 --- a/setup.py +++ b/setup.py @@ -40,7 +40,6 @@ "isort>=5.13.2", "pytest>=7.1.2", "datasets>=2.19.2", - "pytest-reverse", ] }, ) diff --git a/test/convergence/test_mini_models.py b/test/convergence/test_mini_models.py index 4a5ace725..835aac4d6 100644 --- a/test/convergence/test_mini_models.py +++ b/test/convergence/test_mini_models.py @@ -282,8 +282,8 @@ type="mrope", mrope_section=[16, 24, 24], # (temporal, height, width) ), - sliding_window=4096, # 4096 - tie_word_embeddings=True, # False + sliding_window=4096, + tie_word_embeddings=False, use_cache=True, vocab_size=32000, # 152064 use_sliding_window=False, @@ -299,7 +299,7 @@ "spatial_patch_size": 14, "temporal_patch_size": 2, }, - attn_implementation="eager", # fails with sdpa + attn_implementation="sdpa", ), ), "mini_phi3": MiniModelConfig( diff --git a/test/convergence/test_mini_models_no_logits.py b/test/convergence/test_mini_models_no_logits.py index dd72e9c54..2a2b7a04c 100644 --- a/test/convergence/test_mini_models_no_logits.py +++ b/test/convergence/test_mini_models_no_logits.py @@ -110,8 +110,8 @@ type="mrope", mrope_section=[16, 24, 24], # (temporal, height, width) ), - sliding_window=4096, # 4096 - tie_word_embeddings=True, # False + sliding_window=4096, + tie_word_embeddings=False, use_cache=True, vocab_size=32000, # 152064 use_sliding_window=False, @@ -127,7 +127,7 @@ "spatial_patch_size": 14, "temporal_patch_size": 2, }, - attn_implementation="sdpa", # fails with sdpa + attn_implementation="sdpa", ), ), "mini_phi3": MiniModelConfig( From 37ee68531d6f451da9d72fce5f54203c088c866f Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Mon, 2 Sep 2024 21:03:51 -0700 Subject: [PATCH 07/19] Shield imports, checkstyle --- src/liger_kernel/transformers/monkey_patch.py | 14 +-- test/convergence/test_mini_models.py | 91 ++++++++------ .../convergence/test_mini_models_no_logits.py | 118 ++++++++++-------- 3 files changed, 120 insertions(+), 103 deletions(-) diff --git a/src/liger_kernel/transformers/monkey_patch.py b/src/liger_kernel/transformers/monkey_patch.py index 01d6e1482..97708b3a0 100644 --- a/src/liger_kernel/transformers/monkey_patch.py +++ b/src/liger_kernel/transformers/monkey_patch.py @@ -242,7 +242,8 @@ def apply_liger_kernel_to_qwen2_vl( swiglu: bool = True, ) -> None: """ - Apply Liger kernels to replace original implementation in HuggingFace Qwen2-VL models + Apply Liger kernels to replace original implementation in HuggingFace Qwen2-VL models. + NOTE: Qwen2-VL is not available in transformers<=4.44.2 Args: cross_entropy (bool): Whether to apply Liger's cross entropy loss. Default is False. @@ -258,17 +259,14 @@ def apply_liger_kernel_to_qwen2_vl( cross_entropy and fused_linear_cross_entropy ), "cross_entropy and fused_linear_cross_entropy cannot both be True." - # Qwen2 VL isnt supported in the lower versions of transformers that - # liger_kernel supports so we need to shield all qwen2_vl imports + from transformers.models.qwen2_vl import modeling_qwen2_vl + from liger_kernel.transformers.model.qwen2_vl import ( lce_forward as qwen2_vl_lce_forward, ) - from transformers.models.qwen2_vl import modeling_qwen2_vl - # Qwen2 VL has two rope implementations, neither of which is like liger_rotary_pos_emb - # if rope: - # modeling_qwen2_vl.apply_multimodal_rotary_pos_emb = ... - # modeling_qwen2_vl.apply_rotary_pos_emb_vision = ... + # TODO: Support Qwen2-VL's multimodal RoPE implementation + if rms_norm: # https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py#L439 modeling_qwen2_vl.Qwen2RMSNorm = partial( diff --git a/test/convergence/test_mini_models.py b/test/convergence/test_mini_models.py index 835aac4d6..b9ce407c4 100644 --- a/test/convergence/test_mini_models.py +++ b/test/convergence/test_mini_models.py @@ -1,5 +1,13 @@ import functools import os +from test.utils import ( + DEFAULT_DATASET_PATH, + MiniModelConfig, + assert_verbose_allclose, + set_seed, + simple_collate_fn, + supports_bfloat16, +) import pytest import torch @@ -12,10 +20,6 @@ from transformers.models.mixtral import MixtralConfig, MixtralForCausalLM from transformers.models.phi3 import Phi3Config, Phi3ForCausalLM from transformers.models.qwen2 import Qwen2Config, Qwen2ForCausalLM -from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLConfig -from transformers.models.qwen2_vl.modeling_qwen2_vl import ( - Qwen2VLForConditionalGeneration, -) from liger_kernel.transformers import ( apply_liger_kernel_to_gemma, @@ -27,14 +31,17 @@ apply_liger_kernel_to_qwen2, apply_liger_kernel_to_qwen2_vl, ) -from test.utils import ( - DEFAULT_DATASET_PATH, - MiniModelConfig, - assert_verbose_allclose, - set_seed, - simple_collate_fn, - supports_bfloat16, -) + +try: + # Qwen2-VL is only available in transformers>4.44.2 + from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLConfig + from transformers.models.qwen2_vl.modeling_qwen2_vl import ( + Qwen2VLForConditionalGeneration, + ) + + QWEN2_VL_AVAILABLE = True +except ImportError: + QWEN2_VL_AVAILABLE = False torch.use_deterministic_algorithms(True) @@ -258,7 +265,36 @@ attn_implementation="sdpa", # default value, pytorch native attention ), ), - "mini_qwen2_vl": MiniModelConfig( + "mini_phi3": MiniModelConfig( + liger_kernel_patch_func=functools.partial( + apply_liger_kernel_to_phi3, fused_linear_cross_entropy=False + ), + model_class=Phi3ForCausalLM, + mini_model_config=Phi3Config( + attention_dropout=0.0, + bos_token_id=1, + eos_token_id=2, # 32000 + hidden_act="silu", + hidden_size=896, # 3072 + initializer_range=0.02, + intermediate_size=4864, # 8192 + max_position_embeddings=4096, + num_attention_heads=8, # 32 + num_hidden_layers=4, # 32 + num_key_value_heads=None, # defaults to num_attention_heads + rms_norm_eps=1e-5, + rope_theta=10000.0, + sliding_window=None, + tie_word_embeddings=False, + use_cache=True, + vocab_size=32064, + attn_implementation="eager", + ), + ), +} + +if QWEN2_VL_AVAILABLE: + MINI_MODEL_SETUPS["mini_qwen2_vl"] = MiniModelConfig( liger_kernel_patch_func=functools.partial( apply_liger_kernel_to_qwen2_vl, fused_linear_cross_entropy=False ), @@ -301,34 +337,7 @@ }, attn_implementation="sdpa", ), - ), - "mini_phi3": MiniModelConfig( - liger_kernel_patch_func=functools.partial( - apply_liger_kernel_to_phi3, fused_linear_cross_entropy=False - ), - model_class=Phi3ForCausalLM, - mini_model_config=Phi3Config( - attention_dropout=0.0, - bos_token_id=1, - eos_token_id=2, # 32000 - hidden_act="silu", - hidden_size=896, # 3072 - initializer_range=0.02, - intermediate_size=4864, # 8192 - max_position_embeddings=4096, - num_attention_heads=8, # 32 - num_hidden_layers=4, # 32 - num_key_value_heads=None, # defaults to num_attention_heads - rms_norm_eps=1e-5, - rope_theta=10000.0, - sliding_window=None, - tie_word_embeddings=False, - use_cache=True, - vocab_size=32064, - attn_implementation="eager", - ), - ), -} + ) def create_model(model_name="mini_llama3"): diff --git a/test/convergence/test_mini_models_no_logits.py b/test/convergence/test_mini_models_no_logits.py index 2a2b7a04c..7c35653f3 100644 --- a/test/convergence/test_mini_models_no_logits.py +++ b/test/convergence/test_mini_models_no_logits.py @@ -1,3 +1,12 @@ +from test.utils import ( + DEFAULT_DATASET_PATH, + MiniModelConfig, + assert_verbose_allclose, + set_seed, + simple_collate_fn, + supports_bfloat16, +) + import pytest import torch from datasets import load_from_disk @@ -8,10 +17,6 @@ from transformers.models.mistral import MistralConfig, MistralForCausalLM from transformers.models.phi3 import Phi3Config, Phi3ForCausalLM from transformers.models.qwen2 import Qwen2Config, Qwen2ForCausalLM -from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLConfig -from transformers.models.qwen2_vl.modeling_qwen2_vl import ( - Qwen2VLForConditionalGeneration, -) from liger_kernel.transformers import ( apply_liger_kernel_to_gemma, @@ -22,14 +27,17 @@ apply_liger_kernel_to_qwen2, apply_liger_kernel_to_qwen2_vl, ) -from test.utils import ( - DEFAULT_DATASET_PATH, - MiniModelConfig, - assert_verbose_allclose, - set_seed, - simple_collate_fn, - supports_bfloat16, -) + +try: + # Qwen2-VL is only available in transformers>4.44.2 + from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLConfig + from transformers.models.qwen2_vl.modeling_qwen2_vl import ( + Qwen2VLForConditionalGeneration, + ) + + QWEN2_VL_AVAILABLE = True +except ImportError: + QWEN2_VL_AVAILABLE = False MINI_MODEL_SETUPS = { "mini_llama3": MiniModelConfig( @@ -88,48 +96,6 @@ attn_implementation="sdpa", ), ), - "mini_qwen2_vl": MiniModelConfig( - liger_kernel_patch_func=apply_liger_kernel_to_qwen2_vl, - model_class=Qwen2VLForConditionalGeneration, - mini_model_config=Qwen2VLConfig( - attention_dropout=0.0, - bos_token_id=1, # 151643 - eos_token_id=2, # 151645 - hidden_act="silu", - hidden_size=1536, # 8192 - initializer_range=0.02, - intermediate_size=4864, # 29568 - max_position_embeddings=32768, - max_window_layers=4, # 80 - num_attention_heads=12, # 64 - num_hidden_layers=4, # 80 - num_key_value_heads=2, # 8 - rms_norm_eps=1e-6, # 1e-5 - rope_theta=1000000.0, - rope_scaling=dict( - type="mrope", - mrope_section=[16, 24, 24], # (temporal, height, width) - ), - sliding_window=4096, - tie_word_embeddings=False, - use_cache=True, - vocab_size=32000, # 152064 - use_sliding_window=False, - vision_config={ - "depth": 4, # 32 - "embed_dim": 1280, - "mlp_ratio": 4, - "num_heads": 16, - "in_chans": 3, - "hidden_size": 128, # 1536 - "patch_size": 14, - "spatial_merge_size": 2, - "spatial_patch_size": 14, - "temporal_patch_size": 2, - }, - attn_implementation="sdpa", - ), - ), "mini_phi3": MiniModelConfig( liger_kernel_patch_func=apply_liger_kernel_to_phi3, model_class=Phi3ForCausalLM, @@ -265,6 +231,50 @@ ), } +if QWEN2_VL_AVAILABLE: + MINI_MODEL_SETUPS["mini_qwen2_vl"] = MiniModelConfig( + liger_kernel_patch_func=apply_liger_kernel_to_qwen2_vl, + model_class=Qwen2VLForConditionalGeneration, + mini_model_config=Qwen2VLConfig( + attention_dropout=0.0, + bos_token_id=1, # 151643 + eos_token_id=2, # 151645 + hidden_act="silu", + hidden_size=1536, # 8192 + initializer_range=0.02, + intermediate_size=4864, # 29568 + max_position_embeddings=32768, + max_window_layers=4, # 80 + num_attention_heads=12, # 64 + num_hidden_layers=4, # 80 + num_key_value_heads=2, # 8 + rms_norm_eps=1e-6, # 1e-5 + rope_theta=1000000.0, + rope_scaling=dict( + type="mrope", + mrope_section=[16, 24, 24], # (temporal, height, width) + ), + sliding_window=4096, + tie_word_embeddings=False, + use_cache=True, + vocab_size=32000, # 152064 + use_sliding_window=False, + vision_config={ + "depth": 4, # 32 + "embed_dim": 1280, + "mlp_ratio": 4, + "num_heads": 16, + "in_chans": 3, + "hidden_size": 128, # 1536 + "patch_size": 14, + "spatial_merge_size": 2, + "spatial_patch_size": 14, + "temporal_patch_size": 2, + }, + attn_implementation="sdpa", + ), + ) + def create_model(model_name="mini_llama3"): """ From f21fe69c4b82b466427c6f493a8728f762a0e87d Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Mon, 2 Sep 2024 21:29:39 -0700 Subject: [PATCH 08/19] poke tests --- test/convergence/test_mini_models.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/test/convergence/test_mini_models.py b/test/convergence/test_mini_models.py index b9ce407c4..07d004220 100644 --- a/test/convergence/test_mini_models.py +++ b/test/convergence/test_mini_models.py @@ -1,13 +1,5 @@ import functools import os -from test.utils import ( - DEFAULT_DATASET_PATH, - MiniModelConfig, - assert_verbose_allclose, - set_seed, - simple_collate_fn, - supports_bfloat16, -) import pytest import torch @@ -31,6 +23,14 @@ apply_liger_kernel_to_qwen2, apply_liger_kernel_to_qwen2_vl, ) +from test.utils import ( + DEFAULT_DATASET_PATH, + MiniModelConfig, + assert_verbose_allclose, + set_seed, + simple_collate_fn, + supports_bfloat16, +) try: # Qwen2-VL is only available in transformers>4.44.2 @@ -381,7 +381,6 @@ def run_mini_model( kwargs["geglu"] = True else: kwargs["swiglu"] = True - MINI_MODEL_SETUPS[model_name].liger_kernel_patch_func(**kwargs) model = create_model(model_name).to(dtype).to("cuda") From 03bc0368e96bb5f8154c024f47058a32b1ac978f Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Mon, 2 Sep 2024 21:53:19 -0700 Subject: [PATCH 09/19] Revert setup.py --- setup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 5ba382c24..94aa4d10f 100644 --- a/setup.py +++ b/setup.py @@ -28,9 +28,9 @@ keywords="triton,kernels,LLM training,deep learning,Hugging Face,PyTorch,GPU optimization", include_package_data=True, install_requires=[ - "torch>=2.4.0", - "triton>=3.0.0", - "transformers @ git+https://github.com/huggingface/transformers.git", + "torch>=2.1.2", + "triton>=2.3.0", + "transformers>=4.42.0", ], extras_require={ "dev": [ From c4179ebac5810d7340b190216a2bdb8d2f9759ad Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Mon, 2 Sep 2024 22:01:25 -0700 Subject: [PATCH 10/19] Add marks to pytest for lower transformers versions --- test/convergence/test_mini_models.py | 29 ++++++++++-- .../convergence/test_mini_models_no_logits.py | 46 +++++++++++++------ 2 files changed, 58 insertions(+), 17 deletions(-) diff --git a/test/convergence/test_mini_models.py b/test/convergence/test_mini_models.py index 07d004220..ee808ee9d 100644 --- a/test/convergence/test_mini_models.py +++ b/test/convergence/test_mini_models.py @@ -509,7 +509,22 @@ def run_mini_model( not supports_bfloat16(), reason="bfloat16 not supported on this GPU" ), ), - ("mini_qwen2_vl", 32, 1e-4, torch.float32, 1e-8, 1e-5, 5e-3, 1e-5, 5e-3, 1e-5), + pytest.param( + "mini_qwen2_vl", + 32, + 1e-4, + torch.float32, + 1e-8, + 1e-5, + 5e-3, + 1e-5, + 5e-3, + 1e-5, + marks=pytest.mark.skipif( + not QWEN2_VL_AVAILABLE, + reason="Qwen2-VL not available in this version of transformers", + ), + ), pytest.param( "mini_qwen2_vl", 32, @@ -521,9 +536,15 @@ def run_mini_model( 1e-5, 1e-2, 1e-5, - marks=pytest.mark.skipif( - not supports_bfloat16(), reason="bfloat16 not supported on this GPU" - ), + marks=[ + pytest.mark.skipif( + not supports_bfloat16(), reason="bfloat16 not supported on this GPU" + ), + pytest.mark.skipif( + not QWEN2_VL_AVAILABLE, + reason="Qwen2-VL not available in this version of transformers", + ), + ], ), ("mini_phi3", 32, 1e-4, torch.float32, 1e-8, 1e-5, 5e-3, 1e-5, 5e-3, 1e-5), pytest.param( diff --git a/test/convergence/test_mini_models_no_logits.py b/test/convergence/test_mini_models_no_logits.py index 7c35653f3..5dc15e3f6 100644 --- a/test/convergence/test_mini_models_no_logits.py +++ b/test/convergence/test_mini_models_no_logits.py @@ -1,12 +1,3 @@ -from test.utils import ( - DEFAULT_DATASET_PATH, - MiniModelConfig, - assert_verbose_allclose, - set_seed, - simple_collate_fn, - supports_bfloat16, -) - import pytest import torch from datasets import load_from_disk @@ -27,6 +18,14 @@ apply_liger_kernel_to_qwen2, apply_liger_kernel_to_qwen2_vl, ) +from test.utils import ( + DEFAULT_DATASET_PATH, + MiniModelConfig, + assert_verbose_allclose, + set_seed, + simple_collate_fn, + supports_bfloat16, +) try: # Qwen2-VL is only available in transformers>4.44.2 @@ -382,7 +381,22 @@ def run_mini_model( not supports_bfloat16(), reason="bfloat16 not supported on this GPU" ), ), - ("mini_qwen2_vl", 32, 1e-4, torch.float32, 1e-8, 1e-5, 5e-3, 1e-5, 5e-3, 1e-5), + pytest.param( + "mini_qwen2_vl", + 32, + 1e-4, + torch.float32, + 1e-8, + 1e-5, + 5e-3, + 1e-5, + 5e-3, + 1e-5, + marks=pytest.mark.skipif( + not QWEN2_VL_AVAILABLE, + reason="Qwen2-VL not available in this version of transformers", + ), + ), pytest.param( "mini_qwen2_vl", 32, @@ -394,9 +408,15 @@ def run_mini_model( 1e-5, 1e-2, 1e-5, - marks=pytest.mark.skipif( - not supports_bfloat16(), reason="bfloat16 not supported on this GPU" - ), + marks=[ + pytest.mark.skipif( + not supports_bfloat16(), reason="bfloat16 not supported on this GPU" + ), + pytest.mark.skipif( + not QWEN2_VL_AVAILABLE, + reason="Qwen2-VL not available in this version of transformers", + ), + ], ), ("mini_phi3", 32, 1e-4, torch.float32, 1e-8, 1e-5, 5e-3, 1e-5, 5e-3, 1e-5), pytest.param( From 1155d1babdcd2e51bf8f40e3085ff4eddbc15095 Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Mon, 2 Sep 2024 22:02:21 -0700 Subject: [PATCH 11/19] Checkstyle --- test/convergence/test_mini_models.py | 16 ++++++++-------- test/convergence/test_mini_models_no_logits.py | 17 +++++++++-------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/test/convergence/test_mini_models.py b/test/convergence/test_mini_models.py index ee808ee9d..5da771f7b 100644 --- a/test/convergence/test_mini_models.py +++ b/test/convergence/test_mini_models.py @@ -1,5 +1,13 @@ import functools import os +from test.utils import ( + DEFAULT_DATASET_PATH, + MiniModelConfig, + assert_verbose_allclose, + set_seed, + simple_collate_fn, + supports_bfloat16, +) import pytest import torch @@ -23,14 +31,6 @@ apply_liger_kernel_to_qwen2, apply_liger_kernel_to_qwen2_vl, ) -from test.utils import ( - DEFAULT_DATASET_PATH, - MiniModelConfig, - assert_verbose_allclose, - set_seed, - simple_collate_fn, - supports_bfloat16, -) try: # Qwen2-VL is only available in transformers>4.44.2 diff --git a/test/convergence/test_mini_models_no_logits.py b/test/convergence/test_mini_models_no_logits.py index 5dc15e3f6..312f57788 100644 --- a/test/convergence/test_mini_models_no_logits.py +++ b/test/convergence/test_mini_models_no_logits.py @@ -1,3 +1,12 @@ +from test.utils import ( + DEFAULT_DATASET_PATH, + MiniModelConfig, + assert_verbose_allclose, + set_seed, + simple_collate_fn, + supports_bfloat16, +) + import pytest import torch from datasets import load_from_disk @@ -18,14 +27,6 @@ apply_liger_kernel_to_qwen2, apply_liger_kernel_to_qwen2_vl, ) -from test.utils import ( - DEFAULT_DATASET_PATH, - MiniModelConfig, - assert_verbose_allclose, - set_seed, - simple_collate_fn, - supports_bfloat16, -) try: # Qwen2-VL is only available in transformers>4.44.2 From 8c5182b620b4d3b7967fe2cf03fa8e90ad499131 Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Mon, 2 Sep 2024 22:04:19 -0700 Subject: [PATCH 12/19] Revert comment --- test/convergence/test_mini_models_no_logits.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/convergence/test_mini_models_no_logits.py b/test/convergence/test_mini_models_no_logits.py index 312f57788..159502a6a 100644 --- a/test/convergence/test_mini_models_no_logits.py +++ b/test/convergence/test_mini_models_no_logits.py @@ -93,7 +93,11 @@ tie_word_embeddings=True, use_cache=True, vocab_size=32000, # 151936 - attn_implementation="sdpa", + # At rope backward + # Eager produces incontiguous dq and dk + # SDPA produces contiguous dq and incontiguous dk + # Flash_attn produces contiguous dq and dk + attn_implementation="sdpa", # default value, pytorch native attention ), ), "mini_phi3": MiniModelConfig( From 5125cf1db3e2db27103eae0e79b08696a5e49332 Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Wed, 4 Sep 2024 21:42:49 -0700 Subject: [PATCH 13/19] Working multimodal convergence test --- setup.py | 6 +- test/convergence/test_mini_models.py | 119 +------ .../test_mini_models_multimodal.py | 314 ++++++++++++++++++ test/utils.py | 28 ++ 4 files changed, 354 insertions(+), 113 deletions(-) create mode 100644 test/convergence/test_mini_models_multimodal.py diff --git a/setup.py b/setup.py index 94aa4d10f..5ba382c24 100644 --- a/setup.py +++ b/setup.py @@ -28,9 +28,9 @@ keywords="triton,kernels,LLM training,deep learning,Hugging Face,PyTorch,GPU optimization", include_package_data=True, install_requires=[ - "torch>=2.1.2", - "triton>=2.3.0", - "transformers>=4.42.0", + "torch>=2.4.0", + "triton>=3.0.0", + "transformers @ git+https://github.com/huggingface/transformers.git", ], extras_require={ "dev": [ diff --git a/test/convergence/test_mini_models.py b/test/convergence/test_mini_models.py index 5da771f7b..ba3cb8c95 100644 --- a/test/convergence/test_mini_models.py +++ b/test/convergence/test_mini_models.py @@ -1,13 +1,5 @@ import functools import os -from test.utils import ( - DEFAULT_DATASET_PATH, - MiniModelConfig, - assert_verbose_allclose, - set_seed, - simple_collate_fn, - supports_bfloat16, -) import pytest import torch @@ -29,19 +21,15 @@ apply_liger_kernel_to_mixtral, apply_liger_kernel_to_phi3, apply_liger_kernel_to_qwen2, - apply_liger_kernel_to_qwen2_vl, ) - -try: - # Qwen2-VL is only available in transformers>4.44.2 - from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLConfig - from transformers.models.qwen2_vl.modeling_qwen2_vl import ( - Qwen2VLForConditionalGeneration, - ) - - QWEN2_VL_AVAILABLE = True -except ImportError: - QWEN2_VL_AVAILABLE = False +from test.utils import ( + DEFAULT_DATASET_PATH, + MiniModelConfig, + assert_verbose_allclose, + set_seed, + simple_collate_fn, + supports_bfloat16, +) torch.use_deterministic_algorithms(True) @@ -293,52 +281,6 @@ ), } -if QWEN2_VL_AVAILABLE: - MINI_MODEL_SETUPS["mini_qwen2_vl"] = MiniModelConfig( - liger_kernel_patch_func=functools.partial( - apply_liger_kernel_to_qwen2_vl, fused_linear_cross_entropy=False - ), - model_class=Qwen2VLForConditionalGeneration, - mini_model_config=Qwen2VLConfig( - attention_dropout=0.0, - bos_token_id=1, # 151643 - eos_token_id=2, # 151645 - hidden_act="silu", - hidden_size=1536, # 8192 - initializer_range=0.02, - intermediate_size=4864, # 29568 - max_position_embeddings=32768, - max_window_layers=4, # 80 - num_attention_heads=12, # 64 - num_hidden_layers=4, # 80 - num_key_value_heads=2, # 8 - rms_norm_eps=1e-6, # 1e-5 - rope_theta=1000000.0, - rope_scaling=dict( - type="mrope", - mrope_section=[16, 24, 24], # (temporal, height, width) - ), - sliding_window=4096, - tie_word_embeddings=False, - use_cache=True, - vocab_size=32000, # 152064 - use_sliding_window=False, - vision_config={ - "depth": 4, # 32 - "embed_dim": 1280, - "mlp_ratio": 4, - "num_heads": 16, - "in_chans": 3, - "hidden_size": 128, # 1536 - "patch_size": 14, - "spatial_merge_size": 2, - "spatial_patch_size": 14, - "temporal_patch_size": 2, - }, - attn_implementation="sdpa", - ), - ) - def create_model(model_name="mini_llama3"): """ @@ -368,14 +310,8 @@ def run_mini_model( kwargs = { "rms_norm": True, "cross_entropy": True, + "rope": True, } - model_supports_rope = "qwen2_vl" not in model_name - if model_supports_rope: - kwargs["rope"] = True - - model_supports_layer_norm = "qwen2_vl" in model_name - if model_supports_layer_norm: - kwargs["layer_norm"] = True if "gemma" in model_name: kwargs["geglu"] = True @@ -509,43 +445,6 @@ def run_mini_model( not supports_bfloat16(), reason="bfloat16 not supported on this GPU" ), ), - pytest.param( - "mini_qwen2_vl", - 32, - 1e-4, - torch.float32, - 1e-8, - 1e-5, - 5e-3, - 1e-5, - 5e-3, - 1e-5, - marks=pytest.mark.skipif( - not QWEN2_VL_AVAILABLE, - reason="Qwen2-VL not available in this version of transformers", - ), - ), - pytest.param( - "mini_qwen2_vl", - 32, - 1e-4, - torch.bfloat16, - 1e-8, - 1e-5, - 1e-2, - 1e-5, - 1e-2, - 1e-5, - marks=[ - pytest.mark.skipif( - not supports_bfloat16(), reason="bfloat16 not supported on this GPU" - ), - pytest.mark.skipif( - not QWEN2_VL_AVAILABLE, - reason="Qwen2-VL not available in this version of transformers", - ), - ], - ), ("mini_phi3", 32, 1e-4, torch.float32, 1e-8, 1e-5, 5e-3, 1e-5, 5e-3, 1e-5), pytest.param( "mini_phi3", diff --git a/test/convergence/test_mini_models_multimodal.py b/test/convergence/test_mini_models_multimodal.py new file mode 100644 index 000000000..4e03348dd --- /dev/null +++ b/test/convergence/test_mini_models_multimodal.py @@ -0,0 +1,314 @@ +import functools +import os + +import pytest +import torch +from datasets import load_dataset +from torch.utils.data import DataLoader +from transformers.models.auto.processing_auto import AutoProcessor + +from liger_kernel.transformers import ( + apply_liger_kernel_to_qwen2_vl, +) +from test.utils import ( + UNTOKENIZED_DATASET_PATH, + MiniModelConfig, + assert_verbose_allclose, + multimodal_collate_fn, + set_seed, + supports_bfloat16, +) + +try: + # Qwen2-VL is only available in transformers>4.44.2 + from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLConfig + from transformers.models.qwen2_vl.modeling_qwen2_vl import ( + Qwen2VLForConditionalGeneration, + ) + + QWEN2_VL_AVAILABLE = True +except ImportError: + QWEN2_VL_AVAILABLE = False + +torch.use_deterministic_algorithms(True) + +# Only setting torch.use_deterministic_algorithms(True) throws the following error: +# RuntimeError: Deterministic behavior was enabled with either `torch.use_deterministic_algorithms(True)` or `at::Context::setDeterministicAlgorithms(true)`, +# but this operation is not deterministic because it uses CuBLAS and you have CUDA >= 10.2. To enable deterministic behavior in this case, you must set an +# environment variable before running your PyTorch application: CUBLAS_WORKSPACE_CONFIG=:4096:8 or CUBLAS_WORKSPACE_CONFIG=:16:8. For more information, +# go to https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility + +os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" + +TEST_IMAGE_DIM = 64 + +MINI_MODEL_SETUPS = {} + +if QWEN2_VL_AVAILABLE: + MINI_MODEL_SETUPS["mini_qwen2_vl"] = MiniModelConfig( + liger_kernel_patch_func=functools.partial( + apply_liger_kernel_to_qwen2_vl, fused_linear_cross_entropy=False + ), + model_class=Qwen2VLForConditionalGeneration, + mini_model_config=Qwen2VLConfig( + attention_dropout=0.0, + # Token Ids and vocab size must match those in the tokenizer/processor + # https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct/blob/main/config.json + bos_token_id=151643, + eos_token_id=151645, + vision_start_token_id=151652, + vision_end_token_id=151653, + vision_token_id=151654, + image_token_id=151655, + hidden_act="silu", + hidden_size=1024, # 8192 + initializer_range=0.02, + intermediate_size=1024, # 29568 + max_position_embeddings=32768, + max_window_layers=4, # 80 + num_attention_heads=8, # 64 + num_hidden_layers=4, # 80 + num_key_value_heads=2, # 8 + rms_norm_eps=1e-6, # 1e-5 + rope_theta=1000000.0, + rope_scaling=dict( + type="mrope", + mrope_section=[16, 24, 24], # (temporal, height, width) + ), + sliding_window=4096, + tie_word_embeddings=True, + use_cache=False, # True + vocab_size=152064, + use_sliding_window=False, + vision_config={ + "depth": 4, # 32 + "embed_dim": 128, # 1280 + "mlp_ratio": 1, + "num_heads": 8, # 16 + "in_chans": 3, + "hidden_size": 1024, # 1536 + }, + attn_implementation="sdpa", + ), + ) + + +def create_processor(model_name): + if model_name == "mini_qwen2_vl": + return AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct") + else: + raise ValueError(f"Processor not available for model {model_name}") + + +def create_multimodal_dataset(model_name: str): + processor = create_processor(model_name) + + def generate_procedural_image(example, index): + """Generate an image with a single row of white pixels at the index specified""" + image = torch.zeros(3, TEST_IMAGE_DIM, TEST_IMAGE_DIM) + row_index = index % TEST_IMAGE_DIM + image[:, row_index, :] = 255 + example["image"] = image + return example + + def apply_chat_template(example): + """ + Under the hood, this inserts the correct image placeholder token into the text. + More or less this conversation format is used by HF's mllms. + """ + conversation = [ + { + "role": "user", + "content": [ + {"type": "image"}, + {"type": "text", "text": "Describe this image."}, + ], + }, + { + "role": "assistant", + "content": [{"type": "text", "text": example["text"]}], + }, + ] + example["text"] = processor.apply_chat_template(conversation, tokenize=False) + return example + + def preprocess_function(examples): + return processor( + text=examples["text"], + images=examples["image"], + padding="max_length", + truncation=True, + max_length=1024, # longer than for text-only b/c images require quite a few tokens + ) + + train_dataset = ( + load_dataset( + "text", data_files={"train": UNTOKENIZED_DATASET_PATH}, split="train" + ) + .to_iterable_dataset() + .map(generate_procedural_image, with_indices=True) + .map(apply_chat_template) + .map(preprocess_function, remove_columns=["text", "image"]) + ) + return train_dataset + + +def create_model(model_name): + """ + Create a mini version model + The commented values are the original values + """ + model_config = MINI_MODEL_SETUPS[model_name].mini_model_config + model_class = MINI_MODEL_SETUPS[model_name].model_class + return model_class(model_config) + + +def run_mini_model_multimodal( + model_name="mini_qwen2_vl", + num_steps=100, + dtype=torch.bfloat16, + lr=1e-5, + with_liger=False, +): + # If we move it to the beginning of test_mini_model, the two runs are initialized with different weights. + # This is due to RNG (Random Number Generator). The formula of RNG progression is x_(n+1) = (a * x_n + c) % m + # Everytime RNG is used, like randomly initialzing weight, the RNG progresses to the next state. + # Therefore, we have to reset RNG before we create the model to ensure the weight initialization started from the same RNG state. + + set_seed(42) + + if with_liger is True: + kwargs = { + "rms_norm": True, + "cross_entropy": True, + } + model_supports_rope = "qwen2_vl" not in model_name + if model_supports_rope: + kwargs["rope"] = True + + model_supports_layer_norm = "qwen2_vl" in model_name + if model_supports_layer_norm: + kwargs["layer_norm"] = True + + if "gemma" in model_name: + kwargs["geglu"] = True + else: + kwargs["swiglu"] = True + MINI_MODEL_SETUPS[model_name].liger_kernel_patch_func(**kwargs) + + model = create_model(model_name).to(dtype).to("cuda") + model.gradient_checkpointing_enable() + + train_dataset = create_multimodal_dataset(model_name) + loader = DataLoader( + train_dataset, batch_size=2, shuffle=False, collate_fn=multimodal_collate_fn + ) + loader_iter = iter(loader) + optimizer = torch.optim.AdamW(model.parameters(), lr=lr) + + loss_list = [] + + for i in range(num_steps): + batch = next(loader_iter).to(model.device) + optimizer.zero_grad() + output = model(**batch) + output.loss.backward() + optimizer.step() + + # Disable gradient checkpointing after the step + model.gradient_checkpointing_disable() + + print(f"Step {i}, Loss: {output.loss.item()}") + loss_list.append(output.loss.item()) + + return {"loss": loss_list, "logits": output.logits, "model": model} + + +@pytest.mark.parametrize( + "model_name, num_steps, lr, dtype, loss_atol, loss_rtol, logits_atol, logits_rtol, param_atol, param_rtol", + [ + pytest.param( + "mini_qwen2_vl", + 32, + 1e-4, + torch.float32, + 1e-8, + 1e-5, + 5e-3, + 1e-5, + 5e-3, + 1e-5, + marks=pytest.mark.skipif( + not QWEN2_VL_AVAILABLE, + reason="Qwen2-VL not available in this version of transformers", + ), + ), + pytest.param( + "mini_qwen2_vl", + 32, + 1e-4, + torch.bfloat16, + 1e-8, + 1e-5, + 1e-2, + 1e-5, + 1e-2, + 1e-5, + marks=[ + pytest.mark.skipif( + not supports_bfloat16(), reason="bfloat16 not supported on this GPU" + ), + pytest.mark.skipif( + not QWEN2_VL_AVAILABLE, + reason="Qwen2-VL not available in this version of transformers", + ), + ], + ), + ], +) +def test_mini_model_multimodal( + model_name, + num_steps, + lr, + dtype, + loss_atol, + loss_rtol, + logits_atol, + logits_rtol, + param_atol, + param_rtol, +): + # Non-liger models should be initialized and tested first to avoid the module being overridden + expected_output = run_mini_model_multimodal( + model_name=model_name, num_steps=num_steps, dtype=dtype, lr=lr + ) + + actual_output = run_mini_model_multimodal( + model_name=model_name, num_steps=num_steps, dtype=dtype, lr=lr, with_liger=True + ) + + # Compare the loss of every step + assert_verbose_allclose( + torch.tensor([expected_output["loss"]]), + torch.tensor([actual_output["loss"]]), + atol=loss_atol, + rtol=loss_rtol, + ) + + # Compare the logits from the last step + assert_verbose_allclose( + expected_output["logits"], + actual_output["logits"], + atol=logits_atol, + rtol=logits_rtol, + ) + + # Compare the params from the last step + # Iterate over the model's parameters and compare them + for expected_param, actual_param in zip( + expected_output["model"].named_parameters(), + actual_output["model"].named_parameters(), + ): + assert_verbose_allclose( + expected_param[1], actual_param[1], atol=param_atol, rtol=param_rtol + ) diff --git a/test/utils.py b/test/utils.py index cb66742e2..a6168d03b 100644 --- a/test/utils.py +++ b/test/utils.py @@ -88,6 +88,10 @@ def assert_verbose_allclose(tensor1, tensor2, rtol=1e-05, atol=1e-08, max_print= os.path.dirname(os.path.abspath(__file__)), "resources/tiny_shakespeare_tokenized" ) +UNTOKENIZED_DATASET_PATH = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "resources/tiny_shakespeare.txt" +) + @dataclass class MiniModelConfig: @@ -114,6 +118,30 @@ def simple_collate_fn(data: List[Dict[str, Any]]): ) +def multimodal_collate_fn(data: List[Dict[str, Any]]): + """A collate function to use for DataLoader for multimodal models""" + keys = set(data[0].keys()) + keys.remove("input_ids") + + input_ids = torch.stack([torch.tensor(item["input_ids"]) for item in data]).squeeze( + 1 + ) + labels = input_ids.clone() + + batch = {} + # Collate all other keys, e.g. pixel_values, attention_mask, image_grid_thw, etc + for key in keys: + # TODO: find way to not require squeeze(1) for all keys. Its + # currently required b/c the data is being passed in with an extra + # unexpected dimension + batch[key] = torch.stack([item[key] for item in data]).squeeze(1) + + batch["input_ids"] = input_ids + batch["labels"] = labels + + return BatchEncoding(batch) + + def supports_bfloat16(): if not torch.cuda.is_available(): return False From 904704b5660aade2feb5e28344a62dff27b1370e Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Wed, 4 Sep 2024 21:45:27 -0700 Subject: [PATCH 14/19] Checkstyle --- test/convergence/test_mini_models.py | 16 +++++++-------- .../test_mini_models_multimodal.py | 20 +++++++++---------- 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/test/convergence/test_mini_models.py b/test/convergence/test_mini_models.py index ba3cb8c95..c0e8d9df4 100644 --- a/test/convergence/test_mini_models.py +++ b/test/convergence/test_mini_models.py @@ -1,5 +1,13 @@ import functools import os +from test.utils import ( + DEFAULT_DATASET_PATH, + MiniModelConfig, + assert_verbose_allclose, + set_seed, + simple_collate_fn, + supports_bfloat16, +) import pytest import torch @@ -22,14 +30,6 @@ apply_liger_kernel_to_phi3, apply_liger_kernel_to_qwen2, ) -from test.utils import ( - DEFAULT_DATASET_PATH, - MiniModelConfig, - assert_verbose_allclose, - set_seed, - simple_collate_fn, - supports_bfloat16, -) torch.use_deterministic_algorithms(True) diff --git a/test/convergence/test_mini_models_multimodal.py b/test/convergence/test_mini_models_multimodal.py index 4e03348dd..5c2ae5202 100644 --- a/test/convergence/test_mini_models_multimodal.py +++ b/test/convergence/test_mini_models_multimodal.py @@ -1,15 +1,5 @@ import functools import os - -import pytest -import torch -from datasets import load_dataset -from torch.utils.data import DataLoader -from transformers.models.auto.processing_auto import AutoProcessor - -from liger_kernel.transformers import ( - apply_liger_kernel_to_qwen2_vl, -) from test.utils import ( UNTOKENIZED_DATASET_PATH, MiniModelConfig, @@ -19,6 +9,14 @@ supports_bfloat16, ) +import pytest +import torch +from datasets import load_dataset +from torch.utils.data import DataLoader +from transformers.models.auto.processing_auto import AutoProcessor + +from liger_kernel.transformers import apply_liger_kernel_to_qwen2_vl + try: # Qwen2-VL is only available in transformers>4.44.2 from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLConfig @@ -201,7 +199,7 @@ def run_mini_model_multimodal( train_dataset = create_multimodal_dataset(model_name) loader = DataLoader( - train_dataset, batch_size=2, shuffle=False, collate_fn=multimodal_collate_fn + train_dataset, batch_size=4, shuffle=False, collate_fn=multimodal_collate_fn ) loader_iter = iter(loader) optimizer = torch.optim.AdamW(model.parameters(), lr=lr) From c9583d2fc4d30267e5be6a40a1b3bd270e04d8eb Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Wed, 4 Sep 2024 22:11:58 -0700 Subject: [PATCH 15/19] cleanup --- test/convergence/test_mini_models.py | 4 ++-- test/convergence/test_mini_models_multimodal.py | 14 ++++++-------- test/convergence/test_mini_models_no_logits.py | 1 + 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/test/convergence/test_mini_models.py b/test/convergence/test_mini_models.py index c0e8d9df4..a2b6f59ef 100644 --- a/test/convergence/test_mini_models.py +++ b/test/convergence/test_mini_models.py @@ -308,11 +308,10 @@ def run_mini_model( if with_liger is True: kwargs = { + "rope": True, "rms_norm": True, "cross_entropy": True, - "rope": True, } - if "gemma" in model_name: kwargs["geglu"] = True else: @@ -332,6 +331,7 @@ def run_mini_model( for i in range(num_steps): batch = next(loader_iter).to(model.device) + optimizer.zero_grad() output = model(**batch) output.loss.backward() optimizer.step() diff --git a/test/convergence/test_mini_models_multimodal.py b/test/convergence/test_mini_models_multimodal.py index 5c2ae5202..9d77d43e6 100644 --- a/test/convergence/test_mini_models_multimodal.py +++ b/test/convergence/test_mini_models_multimodal.py @@ -104,15 +104,15 @@ def create_multimodal_dataset(model_name: str): def generate_procedural_image(example, index): """Generate an image with a single row of white pixels at the index specified""" image = torch.zeros(3, TEST_IMAGE_DIM, TEST_IMAGE_DIM) - row_index = index % TEST_IMAGE_DIM - image[:, row_index, :] = 255 + image[:, index % TEST_IMAGE_DIM, :] = 255 example["image"] = image return example def apply_chat_template(example): """ Under the hood, this inserts the correct image placeholder token into the text. - More or less this conversation format is used by HF's mllms. + More or less this conversation format is used by HF's mllms. The fact that it is + formatting as for IFT is not in-and-of-itself important here. """ conversation = [ { @@ -131,6 +131,7 @@ def apply_chat_template(example): return example def preprocess_function(examples): + """Tokenize text, preprocess images, and generate other relevant inputs for the model.""" return processor( text=examples["text"], images=examples["image"], @@ -143,7 +144,7 @@ def preprocess_function(examples): load_dataset( "text", data_files={"train": UNTOKENIZED_DATASET_PATH}, split="train" ) - .to_iterable_dataset() + .to_iterable_dataset() # only map examples as-needed and on-demand .map(generate_procedural_image, with_indices=True) .map(apply_chat_template) .map(preprocess_function, remove_columns=["text", "image"]) @@ -199,7 +200,7 @@ def run_mini_model_multimodal( train_dataset = create_multimodal_dataset(model_name) loader = DataLoader( - train_dataset, batch_size=4, shuffle=False, collate_fn=multimodal_collate_fn + train_dataset, batch_size=2, shuffle=False, collate_fn=multimodal_collate_fn ) loader_iter = iter(loader) optimizer = torch.optim.AdamW(model.parameters(), lr=lr) @@ -213,9 +214,6 @@ def run_mini_model_multimodal( output.loss.backward() optimizer.step() - # Disable gradient checkpointing after the step - model.gradient_checkpointing_disable() - print(f"Step {i}, Loss: {output.loss.item()}") loss_list.append(output.loss.item()) diff --git a/test/convergence/test_mini_models_no_logits.py b/test/convergence/test_mini_models_no_logits.py index 159502a6a..a1b409d0a 100644 --- a/test/convergence/test_mini_models_no_logits.py +++ b/test/convergence/test_mini_models_no_logits.py @@ -342,6 +342,7 @@ def run_mini_model( for i in range(num_steps): batch = next(loader_iter).to(model.device) + optimizer.zero_grad() output = model(**batch) output.loss.backward() optimizer.step() From 175abe8bf550fe2063b69e03b32dcde1736eaf1d Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Wed, 4 Sep 2024 22:36:12 -0700 Subject: [PATCH 16/19] Add uv.lock to gitignore --- .gitignore | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 8b4455fc2..32e55a36f 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,7 @@ site/ # Build build/ -dist/ \ No newline at end of file +dist/ + +# Lockfiles +uv.lock \ No newline at end of file From f9422a9b5c07ba71ed586ba9f69816c69429475e Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Thu, 5 Sep 2024 09:00:11 -0700 Subject: [PATCH 17/19] Clean up multimodal collation --- test/utils.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/test/utils.py b/test/utils.py index a6168d03b..5ff28cdac 100644 --- a/test/utils.py +++ b/test/utils.py @@ -120,24 +120,19 @@ def simple_collate_fn(data: List[Dict[str, Any]]): def multimodal_collate_fn(data: List[Dict[str, Any]]): """A collate function to use for DataLoader for multimodal models""" + batch = {} keys = set(data[0].keys()) + + input_ids = torch.cat([torch.tensor(item["input_ids"]) for item in data]) keys.remove("input_ids") + batch["input_ids"] = input_ids - input_ids = torch.stack([torch.tensor(item["input_ids"]) for item in data]).squeeze( - 1 - ) labels = input_ids.clone() + batch["labels"] = labels - batch = {} # Collate all other keys, e.g. pixel_values, attention_mask, image_grid_thw, etc for key in keys: - # TODO: find way to not require squeeze(1) for all keys. Its - # currently required b/c the data is being passed in with an extra - # unexpected dimension - batch[key] = torch.stack([item[key] for item in data]).squeeze(1) - - batch["input_ids"] = input_ids - batch["labels"] = labels + batch[key] = torch.cat([item[key] for item in data]) return BatchEncoding(batch) From 3bd22bcf6da919a5e602104365c3d3553a5d3666 Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Thu, 5 Sep 2024 17:44:31 -0700 Subject: [PATCH 18/19] Update readme --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 5e092f0cc..4d937abe2 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,7 @@ With one line of code, Liger Kernel can increase throughput by more than 20% and - **Exact:** Computation is exact—no approximations! Both forward and backward passes are implemented with rigorous unit tests and undergo convergence testing against training runs without Liger Kernel to ensure accuracy. - **Lightweight:** Liger Kernel has minimal dependencies, requiring only Torch and Triton—no extra libraries needed! Say goodbye to dependency headaches! - **Multi-GPU supported:** Compatible with multi-GPU setups (PyTorch FSDP, DeepSpeed, DDP, etc.). -- **Trainer Framework Integration**: [Axolotl](https://github.com/axolotl-ai-cloud/axolotl), [LLaMa-Factory](https://github.com/hiyouga/LLaMA-Factory), [SFTTrainer](https://github.com/huggingface/trl/releases/tag/v0.10.1), [Hugging Face Trainer](https://github.com/huggingface/transformers/pull/32860) +- **Trainer Framework Integration**: [Axolotl](https://github.com/axolotl-ai-cloud/axolotl), [LLaMa-Factory](https://github.com/hiyouga/LLaMA-Factory), [SFTTrainer](https://github.com/huggingface/trl/releases/tag/v0.10.1), [Hugging Face Trainer](https://github.com/huggingface/transformers/pull/32860) ## Target Audiences @@ -226,6 +226,7 @@ loss.backward() | Gemma1 | `liger_kernel.transformers.apply_liger_kernel_to_gemma` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss, FusedLinearCrossEntropy | | Gemma2 | `liger_kernel.transformers.apply_liger_kernel_to_gemma2` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss | | Qwen2 | `liger_kernel.transformers.apply_liger_kernel_to_qwen2` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy | +| Qwen2-VL | `liger_kernel.transformers.apply_liger_kernel_to_qwen2` | RMSNorm, LayerNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy | | Phi3 | `liger_kernel.transformers.apply_liger_kernel_to_phi3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy | From f590993a865df32a221d6617a01a51dfff51bc47 Mon Sep 17 00:00:00 2001 From: Tyler Romero Date: Thu, 5 Sep 2024 17:46:32 -0700 Subject: [PATCH 19/19] correction --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4d937abe2..2a7c42089 100644 --- a/README.md +++ b/README.md @@ -226,7 +226,7 @@ loss.backward() | Gemma1 | `liger_kernel.transformers.apply_liger_kernel_to_gemma` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss, FusedLinearCrossEntropy | | Gemma2 | `liger_kernel.transformers.apply_liger_kernel_to_gemma2` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss | | Qwen2 | `liger_kernel.transformers.apply_liger_kernel_to_qwen2` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy | -| Qwen2-VL | `liger_kernel.transformers.apply_liger_kernel_to_qwen2` | RMSNorm, LayerNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy | +| Qwen2-VL | `liger_kernel.transformers.apply_liger_kernel_to_qwen2_vl` | RMSNorm, LayerNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy | | Phi3 | `liger_kernel.transformers.apply_liger_kernel_to_phi3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |