From ecf42db5bcc17b187fa8a56ea27136c9404105c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Thu, 29 Jul 2021 15:08:57 +0200 Subject: [PATCH] move copy() as suggested by @carmocca --- pytorch_lightning/loops/batch/training_batch_loop.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pytorch_lightning/loops/batch/training_batch_loop.py b/pytorch_lightning/loops/batch/training_batch_loop.py index 360313546485d..4859b1060c7c8 100644 --- a/pytorch_lightning/loops/batch/training_batch_loop.py +++ b/pytorch_lightning/loops/batch/training_batch_loop.py @@ -144,12 +144,12 @@ def advance(self, batch, batch_idx, dataloader_idx): result = self._run_optimization(batch_idx, split_batch, opt_idx, optimizer) if result: - self.batch_outputs[opt_idx].append(result.training_step_output) + self.batch_outputs[opt_idx].append(copy(result.training_step_output)) else: # in manual optimization, there is no looping over optimizers result = self._run_optimization(batch_idx, split_batch) if result: - self.batch_outputs[0].append(result.training_step_output) + self.batch_outputs[0].append(copy(result.training_step_output)) def teardown(self) -> None: # release memory @@ -319,7 +319,7 @@ def _training_step( closure_loss = training_step_output.minimize / self.trainer.accumulate_grad_batches # the loss will get scaled for amp. avoid any modifications to it loss = closure_loss.detach().clone() - return AttributeDict(closure_loss=closure_loss, loss=loss, training_step_output=copy(training_step_output)) + return AttributeDict(closure_loss=closure_loss, loss=loss, training_step_output=training_step_output) def _process_training_step_output(self, training_step_output: STEP_OUTPUT) -> Optional[ResultCollection]: """Adds the :param:`training_step_output` to the trainer's results