diff --git a/docs/source/common/lightning_module.rst b/docs/source/common/lightning_module.rst index e9c138a2c12cc..c02f23ac60d09 100644 --- a/docs/source/common/lightning_module.rst +++ b/docs/source/common/lightning_module.rst @@ -946,7 +946,7 @@ When set to ``False``, Lightning does not automate the optimization process. Thi opt = self.optimizers(use_pl_optimizer=True) loss = ... - self.manual_backward(loss, opt) + self.manual_backward(loss) opt.step() opt.zero_grad() @@ -961,16 +961,16 @@ In the multi-optimizer case, ignore the ``optimizer_idx`` argument and use the o def training_step(self, batch, batch_idx, optimizer_idx): # access your optimizers with use_pl_optimizer=False. Default is True - (opt_a, opt_b) = self.optimizers(use_pl_optimizer=True) + opt_a, opt_b = self.optimizers(use_pl_optimizer=True) gen_loss = ... opt_a.zero_grad() - self.manual_backward(gen_loss, opt_a) + self.manual_backward(gen_loss) opt_a.step() disc_loss = ... opt_b.zero_grad() - self.manual_backward(disc_loss, opt_b) + self.manual_backward(disc_loss) opt_b.step() -------------- diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index 5dce0caa59720..52bcc213692ac 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -1211,10 +1211,10 @@ def manual_backward(self, loss: Tensor, optimizer: Optional[Optimizer] = None, * Example:: def training_step(...): - (opt_a, opt_b) = self.optimizers() + opt_a, opt_b = self.optimizers() loss = ... # automatically applies scaling, etc... - self.manual_backward(loss, opt_a) + self.manual_backward(loss) opt_a.step() """ if optimizer is not None: