From 56b758cd9563ae03fadf0699fe1894d445539d6d Mon Sep 17 00:00:00 2001 From: lugimzzz Date: Wed, 28 Aug 2024 20:59:04 +0800 Subject: [PATCH 1/3] add sp hook --- llm/alignment/dpo/run_dpo.py | 6 +++++- llm/run_finetune.py | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/llm/alignment/dpo/run_dpo.py b/llm/alignment/dpo/run_dpo.py index 3945375aee43..17d8c7b8efa2 100644 --- a/llm/alignment/dpo/run_dpo.py +++ b/llm/alignment/dpo/run_dpo.py @@ -35,6 +35,7 @@ AutoTokenizer, LlamaForCausalLM, LlamaForCausalLMPipe, + register_sequence_parallel_allreduce_hooks, ) from paddlenlp.trl import ( DPOTrainer, @@ -138,7 +139,10 @@ def main(): if model_args.flash_mask and not any(isinstance(model, cls) for cls in flash_mask_support_list): raise NotImplementedError(f"{model.__class__} not support flash mask.") - + if training_args.sequence_parallel: + register_sequence_parallel_allreduce_hooks( + model, training_args.gradient_accumulation_steps, training_args.fuse_sequence_parallel_allreduce + ) if model_args.tokenizer_name_or_path is not None: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name_or_path) else: diff --git a/llm/run_finetune.py b/llm/run_finetune.py index 576191f79755..41208e2427cf 100644 --- a/llm/run_finetune.py +++ b/llm/run_finetune.py @@ -52,6 +52,7 @@ LlamaForCausalLM, LlamaForCausalLMPipe, LlamaTokenizer, + register_sequence_parallel_allreduce_hooks, ) from paddlenlp.transformers.configuration_utils import LlmMetaConfig from paddlenlp.utils.llm_utils import ( @@ -197,7 +198,10 @@ def neft_post_hook(module, input, output): neft_post_hook_handle = model.get_input_embeddings().register_forward_post_hook(neft_post_hook) else: raise NotImplementedError("Only support neftune for model with get_input_embeddings") - + if training_args.sequence_parallel: + register_sequence_parallel_allreduce_hooks( + model, training_args.gradient_accumulation_steps, training_args.fuse_sequence_parallel_allreduce + ) # Load tokenizer & dataset tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, from_aistudio=model_args.from_aistudio) # init chat_template for tokenizer From b2dfb7e90b88ae4541f809baeea10ea83df6f1df Mon Sep 17 00:00:00 2001 From: lugimzzz Date: Wed, 28 Aug 2024 21:00:08 +0800 Subject: [PATCH 2/3] support zero padding pad to max_length --- llm/run_finetune.py | 1 + 1 file changed, 1 insertion(+) diff --git a/llm/run_finetune.py b/llm/run_finetune.py index 7f7d59369880..0795d149181b 100644 --- a/llm/run_finetune.py +++ b/llm/run_finetune.py @@ -526,6 +526,7 @@ def compute_metrics_do_generation(eval_preds): training_args.pipeline_parallel_degree > 1 or training_args.sequence_parallel or training_args.autotuner_benchmark + or data_args.zero_padding ): # NOTE(gongenlei): new add autotuner_benchmark max_length = data_args.max_length From 83a0b2505753df31c92e8424cb85f2e2bbf1860f Mon Sep 17 00:00:00 2001 From: lugimzzz Date: Wed, 28 Aug 2024 21:01:50 +0800 Subject: [PATCH 3/3] support finetune pad to max_length --- llm/run_finetune.py | 1 + llm/utils/argument.py | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/llm/run_finetune.py b/llm/run_finetune.py index 0795d149181b..67890ea94761 100644 --- a/llm/run_finetune.py +++ b/llm/run_finetune.py @@ -527,6 +527,7 @@ def compute_metrics_do_generation(eval_preds): or training_args.sequence_parallel or training_args.autotuner_benchmark or data_args.zero_padding + or data_args.pad_to_max_length ): # NOTE(gongenlei): new add autotuner_benchmark max_length = data_args.max_length diff --git a/llm/utils/argument.py b/llm/utils/argument.py index 5c4df81ff05d..52f6ebb14594 100644 --- a/llm/utils/argument.py +++ b/llm/utils/argument.py @@ -132,6 +132,10 @@ class DataArgument: "help": "@deprecated Please use `zero_padding`. Whether to use InTokens data stream, same as `zero_padding`." }, ) # Alias for zero_padding + pad_to_max_length: bool = field( + default=False, + metadata={"help": "Pad the input sequence to `max_length`."}, + ) def __post_init__(self): if self.task_name_or_path is not None: