Skip to content

Commit

Permalink
[LLM] disable part of MC2 in lora
Browse files Browse the repository at this point in the history
  • Loading branch information
SylarTiaNII committed May 29, 2024
1 parent c1cfe63 commit 24ac1d2
Showing 1 changed file with 4 additions and 2 deletions.
6 changes: 4 additions & 2 deletions paddlenlp/peft/lora/lora_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,8 @@ def forward(self, x: paddle.Tensor):
else:
input_mp = x

if MC2RowSeqParallelCoreLinear is None:
# TODO(@gexiao): temporary workaround for deterministic calculation
if True or MC2RowSeqParallelCoreLinear is None:
output_parallel = self.linear(input_mp, self.weight, name=self._name)
output_ = ReduceScatterOp.apply(output_parallel)
result_mp = output_ + self.bias if self.bias is not None else output_
Expand Down Expand Up @@ -651,7 +652,8 @@ def forward(self, x: paddle.Tensor):

if not self.merged:
input_a = self.lora_dropout(x) @ self.lora_A
if MC2ColumnSeqParallelCoreLinear is None:
# TODO(@gexiao): temporary workaround for deterministic calculation
if True or MC2ColumnSeqParallelCoreLinear is None:
input_a = AllGatherOp.apply(input_a)
delta_mp = (input_a @ self.lora_B) * self.scaling
else:
Expand Down

0 comments on commit 24ac1d2

Please sign in to comment.