Skip to content

Commit

Permalink
[Distributed] enable tensor_parallel_output for finetuning
Browse files Browse the repository at this point in the history
  • Loading branch information
SylarTiaNII committed May 7, 2024
1 parent fdcabf8 commit 88b1da4
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions llm/finetune_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def main():
if not training_args.autotuner_benchmark:
model = AutoModelForCausalLMPipe.from_pretrained(
model_args.model_name_or_path,
tensor_parallel_output=False,
tensor_parallel_output=True,
tensor_parallel_degree=training_args.tensor_parallel_degree,
tensor_parallel_rank=training_args.tensor_parallel_rank,
use_flash_attention=model_args.use_flash_attention,
Expand All @@ -152,7 +152,7 @@ def main():
# NOTE(gongenlei): new add autotuner_benchmark
model_config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
tensor_parallel_output=False,
tensor_parallel_output=True,
tensor_parallel_degree=training_args.tensor_parallel_degree,
tensor_parallel_rank=training_args.tensor_parallel_rank,
dtype=dtype,
Expand All @@ -163,7 +163,7 @@ def main():
else:
model_config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
tensor_parallel_output=False,
tensor_parallel_output=True,
tensor_parallel_degree=training_args.tensor_parallel_degree,
tensor_parallel_rank=training_args.tensor_parallel_rank,
dtype=dtype,
Expand Down

0 comments on commit 88b1da4

Please sign in to comment.