From 2d9f6507d6e46a1f94b7cee865f4e66ed0a80d65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Tue, 22 Jun 2021 12:07:53 +0200 Subject: [PATCH 01/12] rename training_loop -> epoch_Loop --- pytorch_lightning/core/lightning.py | 4 +- pytorch_lightning/core/optimizer.py | 2 +- pytorch_lightning/loops/fit_loop.py | 45 ++++++++++--------- tests/deprecated_api/test_remove_1-5.py | 2 +- .../loops/test_evaluation_loop_flow.py | 8 ++-- .../loops/test_training_loop_flow_scalar.py | 12 ++--- tests/trainer/test_trainer.py | 16 +++---- 7 files changed, 45 insertions(+), 44 deletions(-) diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index fb0b19899561d..e7c9852968b36 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -1371,7 +1371,7 @@ def training_step(...): # backward self._running_manual_backward = True - self.trainer.fit_loop.training_loop.batch_loop.backward(loss, optimizer=None, opt_idx=None, *args, **kwargs) + self.trainer.fit_loop.epoch_loop.batch_loop.backward(loss, optimizer=None, opt_idx=None, *args, **kwargs) self._running_manual_backward = False def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None: @@ -1471,7 +1471,7 @@ def optimizer_step( If you are overriding this method, make sure that you pass the ``optimizer_closure`` parameter to ``optimizer.step()`` function as shown in the examples. This ensures that ``training_step()``, ``optimizer.zero_grad()``, ``backward()`` are called within - :meth:`~pytorch_lightning.trainer.fit_loop.training_loop.batch_loop.TrainingBatchLoop.advance`. + :meth:`~pytorch_lightning.loops.training_batch_loop.TrainingBatchLoop.advance`. Args: epoch: Current epoch diff --git a/pytorch_lightning/core/optimizer.py b/pytorch_lightning/core/optimizer.py index 1da8a7af36221..3572a79b9bd84 100644 --- a/pytorch_lightning/core/optimizer.py +++ b/pytorch_lightning/core/optimizer.py @@ -120,7 +120,7 @@ def toggle_model(self, sync_grad: bool = True): during the accumulation phase. Setting `sync_grad` to False will block this synchronization and improve performance. """ - with self._trainer.fit_loop.training_loop.batch_loop.block_ddp_sync_behaviour(not sync_grad): + with self._trainer.fit_loop.epoch_loop.batch_loop.block_ddp_sync_behaviour(not sync_grad): self._toggle_model() yield self._untoggle_model() diff --git a/pytorch_lightning/loops/fit_loop.py b/pytorch_lightning/loops/fit_loop.py index c7c2585feb129..a0f9aed4068d3 100644 --- a/pytorch_lightning/loops/fit_loop.py +++ b/pytorch_lightning/loops/fit_loop.py @@ -51,8 +51,9 @@ def __init__( super().__init__() self.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs self.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs - self.training_loop = TrainingEpochLoop(min_steps, max_steps) + self.epoch_loop = TrainingEpochLoop(min_steps, max_steps) self.validation_loop = EvaluationDataLoaderLoop() + self.results = ResultCollection(training=True) @property def results(self) -> ResultCollection: @@ -75,59 +76,59 @@ def current_epoch(self, value: int) -> None: @property def global_step(self) -> int: """Returns the global step""" - return self.training_loop.global_step + return self.epoch_loop.global_step @global_step.setter def global_step(self, value: int) -> None: - """Sets the global step (forwards to training_loop)""" - self.training_loop.global_step = value + """Sets the global step (forwards to epoch_loop)""" + self.epoch_loop.global_step = value @property def total_batch_idx(self) -> int: """Returns the total number of batches already run (across all epochs)""" - return self.training_loop.total_batch_idx + return self.epoch_loop.total_batch_idx @property def batch_idx(self) -> int: """Returns the number of batches already run within this epoch""" - return self.training_loop.iteration_count + return self.epoch_loop.iteration_count @property def split_idx(self) -> int: """Returns the index of the current batch split (within the current batch) for bptt""" - return self.training_loop.split_idx + return self.epoch_loop.split_idx @property def min_steps(self) -> int: # TODO(@justusschock): Why aren't we using the attribute in this class? """Returns the minimum numnber of steps to run""" - return self.training_loop.min_steps + return self.epoch_loop.min_steps @property def max_steps(self) -> int: """Returns the maximum number of steps to run""" - return self.training_loop.max_steps + return self.epoch_loop.max_steps @max_steps.setter def max_steps(self, value: int) -> None: - """Sets the maximum number of steps (forwards to training_loop)""" + """Sets the maximum number of steps (forwards to epoch_loop)""" # TODO(@awaelchli): This setter is required by debugging connector (fast dev run), should be avoided - self.training_loop.max_steps = value + self.epoch_loop.max_steps = value @property def running_loss(self) -> TensorRunningAccum: """Returns the running loss""" - return self.training_loop.batch_loop.running_loss + return self.epoch_loop.batch_loop.running_loss @property def _skip_backward(self) -> bool: """ Determines whether the loop will skip backward during automatic optimization. """ - return self.training_loop.batch_loop._skip_backward + return self.epoch_loop.batch_loop._skip_backward @_skip_backward.setter def _skip_backward(self, value: bool) -> None: """ Determines whether the loop will skip backward during automatic optimization. """ - self.training_loop.batch_loop._skip_backward = value + self.epoch_loop.batch_loop._skip_backward = value @property def done(self) -> bool: @@ -165,7 +166,7 @@ def skip(self) -> bool: def connect(self, trainer: 'pl.Trainer', *args: Any, **kwargs: Any) -> None: """Connects the loop with necessary arguments like the trainer""" super().connect(trainer, *args, **kwargs) - self.training_loop.connect(trainer) + self.epoch_loop.connect(trainer) self.validation_loop.connect(trainer) def reset(self) -> None: @@ -193,7 +194,7 @@ def on_advance_start(self) -> None: self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module) # stores accumulated grad fractions per batch - self.training_loop.batch_loop.accumulated_loss = TensorRunningAccum( + self.epoch_loop.batch_loop.accumulated_loss = TensorRunningAccum( window_length=self.trainer.accumulate_grad_batches ) @@ -204,7 +205,7 @@ def advance(self) -> None: with self.trainer.profiler.profile("run_training_epoch"): # run train epoch - epoch_output = self.training_loop.run(train_dataloader) + epoch_output = self.epoch_loop.run(train_dataloader) if epoch_output is None: return @@ -220,10 +221,10 @@ def advance(self) -> None: def on_advance_end(self) -> None: """Updates the LR schedulers and does some internal bookkeeping""" - if self.training_loop.batches_seen == 0: + if self.epoch_loop.batches_seen == 0: return - self.training_loop.update_lr_schedulers('epoch', update_plateau_schedulers=True) + self.epoch_loop.update_lr_schedulers('epoch', update_plateau_schedulers=True) did_train_only = self.trainer.disable_validation or self.trainer.evaluation_loop.skip if did_train_only: @@ -241,10 +242,10 @@ def on_run_end(self) -> None: # trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates # when a checkpoint was saved at the last step - self.training_loop.global_step -= 1 + self.epoch_loop.global_step -= 1 # TODO: see discussion/rework https://github.com/PyTorchLightning/pytorch-lightning/issues/7406 self._check_checkpoint_callback(should_update=True, is_last=True) - self.training_loop.global_step += 1 + self.epoch_loop.global_step += 1 # hook self.trainer.call_hook("on_train_end") @@ -266,7 +267,7 @@ def on_run_end(self) -> None: def should_accumulate(self) -> bool: """Whether the gradients should be accumulated""" - return self.training_loop.batch_loop.should_accumulate() + return self.epoch_loop.batch_loop.should_accumulate() def _check_checkpoint_callback(self, should_update: bool, is_last: bool = False): """Checks if checkpointing needs to be done""" diff --git a/tests/deprecated_api/test_remove_1-5.py b/tests/deprecated_api/test_remove_1-5.py index f8595390dd768..70bcc71d0a2a6 100644 --- a/tests/deprecated_api/test_remove_1-5.py +++ b/tests/deprecated_api/test_remove_1-5.py @@ -244,7 +244,7 @@ def on_train_epoch_end(self, outputs): # noqa with pytest.deprecated_call(match="old signature will be removed in v1.5"): trainer.fit(model) - trainer.fit_loop.training_loop._warning_cache.clear() + trainer.fit_loop.epoch_loop._warning_cache.clear() class NewSignature(Callback): diff --git a/tests/trainer/loops/test_evaluation_loop_flow.py b/tests/trainer/loops/test_evaluation_loop_flow.py index c9eb997c98dd6..14cb4ce4ae7f8 100644 --- a/tests/trainer/loops/test_evaluation_loop_flow.py +++ b/tests/trainer/loops/test_evaluation_loop_flow.py @@ -69,7 +69,7 @@ def backward(self, loss, optimizer, optimizer_idx): # simulate training manually trainer.state.stage = RunningStage.TRAINING batch_idx, batch = 0, next(iter(model.train_dataloader())) - out = trainer.fit_loop.training_loop.batch_loop.run(batch, batch_idx, 0) + out = trainer.fit_loop.epoch_loop.batch_loop.run(batch, batch_idx, 0) assert out.signal == 0 train_step_out = out.training_step_output @@ -79,7 +79,7 @@ def backward(self, loss, optimizer, optimizer_idx): assert train_step_out.minimize.item() == 171 # make sure the optimizer closure returns the correct things - opt_closure_result = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward( + opt_closure_result = trainer.fit_loop.epoch_loop.batch_loop.training_step_and_backward( batch, batch_idx, 0, @@ -140,7 +140,7 @@ def backward(self, loss, optimizer, optimizer_idx): trainer.state.stage = RunningStage.TRAINING # make sure training outputs what is expected batch_idx, batch = 0, next(iter(model.train_dataloader())) - out = trainer.fit_loop.training_loop.batch_loop.run(batch, batch_idx, 0) + out = trainer.fit_loop.epoch_loop.batch_loop.run(batch, batch_idx, 0) assert out.signal == 0 train_step_out = out.training_step_output @@ -150,7 +150,7 @@ def backward(self, loss, optimizer, optimizer_idx): assert train_step_out.minimize.item() == 171 # make sure the optimizer closure returns the correct things - opt_closure_result = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward( + opt_closure_result = trainer.fit_loop.epoch_loop.batch_loop.training_step_and_backward( batch, batch_idx, 0, trainer.optimizers[0], hiddens=None ) assert opt_closure_result['loss'].item() == 171 diff --git a/tests/trainer/loops/test_training_loop_flow_scalar.py b/tests/trainer/loops/test_training_loop_flow_scalar.py index 0e57797a80890..9b438aea45f87 100644 --- a/tests/trainer/loops/test_training_loop_flow_scalar.py +++ b/tests/trainer/loops/test_training_loop_flow_scalar.py @@ -149,7 +149,7 @@ def backward(self, loss, optimizer, optimizer_idx): trainer.state.stage = RunningStage.TRAINING # make sure training outputs what is expected batch_idx, batch = 0, next(iter(model.train_dataloader())) - out = trainer.fit_loop.training_loop.batch_loop.run(batch, batch_idx, 0) + out = trainer.fit_loop.epoch_loop.batch_loop.run(batch, batch_idx, 0) assert out.signal == 0 train_step_out = out.training_step_output @@ -159,7 +159,7 @@ def backward(self, loss, optimizer, optimizer_idx): assert train_step_out.minimize.item() == 171 # make sure the optimizer closure returns the correct things - opt_closure_result = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward( + opt_closure_result = trainer.fit_loop.epoch_loop.batch_loop.training_step_and_backward( batch, batch_idx, 0, @@ -227,7 +227,7 @@ def backward(self, loss, optimizer, optimizer_idx): trainer.state.stage = RunningStage.TRAINING # make sure training outputs what is expected batch_idx, batch = 0, next(iter(model.train_dataloader())) - out = trainer.fit_loop.training_loop.batch_loop.run(batch, batch_idx, 0) + out = trainer.fit_loop.epoch_loop.batch_loop.run(batch, batch_idx, 0) assert out.signal == 0 train_step_out = out.training_step_output @@ -237,7 +237,7 @@ def backward(self, loss, optimizer, optimizer_idx): assert train_step_out.minimize.item() == 171 # make sure the optimizer closure returns the correct things - opt_closure_result = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward( + opt_closure_result = trainer.fit_loop.epoch_loop.batch_loop.training_step_and_backward( batch, batch_idx, 0, trainer.optimizers[0], hiddens=None ) assert opt_closure_result['loss'].item() == 171 @@ -313,7 +313,7 @@ def training_step(self, batch, batch_idx): # manually check a few batches for batch_idx, batch in enumerate(model.train_dataloader()): - out = trainer.fit_loop.training_loop.batch_loop.run(batch, batch_idx, 0) + out = trainer.fit_loop.epoch_loop.batch_loop.run(batch, batch_idx, 0) if not batch_idx % 2: assert out.training_step_output == [[]] assert out.signal == 0 @@ -358,7 +358,7 @@ def train_dataloader(self): # manually check a few batches for batch_idx, batch in enumerate(model.train_dataloader()): - out = trainer.fit_loop.training_loop.batch_loop.run(batch, batch_idx, 0) + out = trainer.fit_loop.epoch_loop.batch_loop.run(batch, batch_idx, 0) if not batch_idx % 2: assert out.training_step_output == [[]] assert out.signal == 0 diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index f75ed3ac340f4..7d29376efbb0b 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -926,7 +926,7 @@ def test_gradient_clipping(tmpdir): default_root_dir=tmpdir, ) - old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward + old_training_step_and_backward = trainer.fit_loop.epoch_loop.batch_loop.training_step_and_backward def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens): """ @@ -940,7 +940,7 @@ def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hidde return ret_val - trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward + trainer.fit_loop.epoch_loop.batch_loop.training_step_and_backward = training_step_and_backward # for the test model.prev_called_batch_idx = 0 @@ -964,7 +964,7 @@ def test_gradient_clipping_by_value(tmpdir): default_root_dir=tmpdir ) - old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward + old_training_step_and_backward = trainer.fit_loop.epoch_loop.batch_loop.training_step_and_backward def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens): """ @@ -980,7 +980,7 @@ def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hidde return ret_val - trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward + trainer.fit_loop.epoch_loop.batch_loop.training_step_and_backward = training_step_and_backward # for the test model.prev_called_batch_idx = 0 @@ -1005,7 +1005,7 @@ def test_gradient_clipping_fp16(tmpdir): default_root_dir=tmpdir, ) - old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward + old_training_step_and_backward = trainer.fit_loop.epoch_loop.batch_loop.training_step_and_backward def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens): """ @@ -1019,7 +1019,7 @@ def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hidde return ret_val - trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward + trainer.fit_loop.epoch_loop.batch_loop.training_step_and_backward = training_step_and_backward model.prev_called_batch_idx = 0 trainer.fit(model) @@ -1044,7 +1044,7 @@ def test_gradient_clipping_by_value_fp16(tmpdir): default_root_dir=tmpdir, ) - old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward + old_training_step_and_backward = trainer.fit_loop.epoch_loop.batch_loop.training_step_and_backward def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens): """ @@ -1060,7 +1060,7 @@ def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hidde return ret_val - trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward + trainer.fit_loop.epoch_loop.batch_loop.training_step_and_backward = training_step_and_backward model.prev_called_batch_idx = 0 trainer.fit(model) From 03470f16625a9803af44f756d6254c68e9d1635c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Tue, 22 Jun 2021 12:11:58 +0200 Subject: [PATCH 02/12] EvaluationDataLoaderLoop -> EvaluationLoop --- CHANGELOG.md | 2 +- pytorch_lightning/loops/__init__.py | 2 +- pytorch_lightning/loops/dataloader/__init__.py | 2 +- .../loops/dataloader/evaluation_dataloader_loop.py | 2 +- pytorch_lightning/trainer/properties.py | 6 +++--- tests/trainer/loops/test_evaluation_loop.py | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a0a423d6968f9..d1bfed8ab2ceb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -148,7 +148,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). * Removed the `on_epoch` guard from the "should stop" validation check ([#7701](https://github.com/PyTorchLightning/pytorch-lightning/pull/7701)) * Refactored internal loop interface; added new classes `FitLoop`, `TrainingEpochLoop`, `TrainingBatchLoop` ([#7871](https://github.com/PyTorchLightning/pytorch-lightning/pull/7871)) * Removed `pytorch_lightning/trainer/training_loop.py` ([#7985](https://github.com/PyTorchLightning/pytorch-lightning/pull/7985)) - * Refactored evaluation loop interface; added new classes `DataLoaderLoop`, `EvaluationDataLoaderLoop`, `EvaluationEpochLoop` ([#7990](https://github.com/PyTorchLightning/pytorch-lightning/pull/7990)) + * Refactored evaluation loop interface; added new classes `DataLoaderLoop`, `EvaluationLoop`, `EvaluationEpochLoop` ([#7990](https://github.com/PyTorchLightning/pytorch-lightning/pull/7990)) * Removed `pytorch_lightning/trainer/evaluation_loop.py` ([#8056](https://github.com/PyTorchLightning/pytorch-lightning/pull/8056)) * Restricted public access to several internal functions ([#8024](https://github.com/PyTorchLightning/pytorch-lightning/pull/8024)) * Refactored trainer `_run_*` functions and separate evaluation loops ([#8065](https://github.com/PyTorchLightning/pytorch-lightning/pull/8065)) diff --git a/pytorch_lightning/loops/__init__.py b/pytorch_lightning/loops/__init__.py index f908bd4df05a5..2e56693db6446 100644 --- a/pytorch_lightning/loops/__init__.py +++ b/pytorch_lightning/loops/__init__.py @@ -14,7 +14,7 @@ from pytorch_lightning.loops.base import Loop # noqa: F401 from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop # noqa: F401 -from pytorch_lightning.loops.dataloader.evaluation_dataloader_loop import EvaluationDataLoaderLoop # noqa: F401 +from pytorch_lightning.loops.dataloader.evaluation_dataloader_loop import EvaluationLoop # noqa: F401 from pytorch_lightning.loops.fit_loop import FitLoop # noqa: F401 from pytorch_lightning.loops.training_batch_loop import TrainingBatchLoop # noqa: F401 from pytorch_lightning.loops.training_epoch_loop import TrainingEpochLoop # noqa: F401 diff --git a/pytorch_lightning/loops/dataloader/__init__.py b/pytorch_lightning/loops/dataloader/__init__.py index 47da26d0ba5a5..c77711f2f5c86 100644 --- a/pytorch_lightning/loops/dataloader/__init__.py +++ b/pytorch_lightning/loops/dataloader/__init__.py @@ -13,4 +13,4 @@ # limitations under the License. from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop # noqa: F401 -from pytorch_lightning.loops.dataloader.evaluation_dataloader_loop import EvaluationDataLoaderLoop # noqa: F401 +from pytorch_lightning.loops.dataloader.evaluation_dataloader_loop import EvaluationLoop # noqa: F401 diff --git a/pytorch_lightning/loops/dataloader/evaluation_dataloader_loop.py b/pytorch_lightning/loops/dataloader/evaluation_dataloader_loop.py index e5565d6a8912b..ef94b89a11f4b 100644 --- a/pytorch_lightning/loops/dataloader/evaluation_dataloader_loop.py +++ b/pytorch_lightning/loops/dataloader/evaluation_dataloader_loop.py @@ -26,7 +26,7 @@ from pytorch_lightning.utilities.types import EPOCH_OUTPUT -class EvaluationDataLoaderLoop(DataLoaderLoop): +class EvaluationLoop(DataLoaderLoop): """Loops over all dataloaders for evaluation.""" def __init__(self): diff --git a/pytorch_lightning/trainer/properties.py b/pytorch_lightning/trainer/properties.py index 1fd82b7c3e28c..2ddd43c789771 100644 --- a/pytorch_lightning/trainer/properties.py +++ b/pytorch_lightning/trainer/properties.py @@ -29,7 +29,7 @@ from pytorch_lightning.core.optimizer import LightningOptimizer from pytorch_lightning.loggers import LightningLoggerBase from pytorch_lightning.loggers.tensorboard import TensorBoardLogger -from pytorch_lightning.loops.dataloader.evaluation_dataloader_loop import EvaluationDataLoaderLoop +from pytorch_lightning.loops.dataloader.evaluation_dataloader_loop import EvaluationLoop from pytorch_lightning.loops.fit_loop import FitLoop from pytorch_lightning.plugins import ParallelPlugin, PrecisionPlugin, TrainingTypePlugin from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector @@ -524,10 +524,10 @@ def min_steps(self) -> Optional[int]: @property def is_last_batch(self) -> bool: - return self.fit_loop.training_loop.is_last_batch + return self.fit_loop.epoch_loop.is_last_batch @property - def _active_loop(self) -> Optional[Union[FitLoop, EvaluationDataLoaderLoop]]: + def _active_loop(self) -> Optional[Union[FitLoop, EvaluationLoop]]: if self.training: return self.fit_loop elif self.sanity_checking or self.evaluating: diff --git a/tests/trainer/loops/test_evaluation_loop.py b/tests/trainer/loops/test_evaluation_loop.py index 0d7584628b933..8f3cbaaa3cf00 100644 --- a/tests/trainer/loops/test_evaluation_loop.py +++ b/tests/trainer/loops/test_evaluation_loop.py @@ -22,7 +22,7 @@ @mock.patch( - "pytorch_lightning.loops.dataloader.evaluation_dataloader_loop.EvaluationDataLoaderLoop.on_evaluation_epoch_end" + "pytorch_lightning.loops.dataloader.evaluation_dataloader_loop.EvaluationLoop.on_evaluation_epoch_end" ) def test_on_evaluation_epoch_end(eval_epoch_end_mock, tmpdir): """ From 20d835eecef869aca289170750d062710e7a107e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Tue, 22 Jun 2021 12:17:27 +0200 Subject: [PATCH 03/12] proposed rename files --- pytorch_lightning/loops/__init__.py | 8 ++++---- pytorch_lightning/loops/batch/__init__.py | 0 .../loops/{ => batch}/training_batch_loop.py | 0 pytorch_lightning/loops/dataloader/__init__.py | 16 ---------------- .../loops/{dataloader => }/dataloader_loop.py | 0 pytorch_lightning/loops/epoch/__init__.py | 0 .../loops/{ => epoch}/evaluation_epoch_loop.py | 0 .../loops/{ => epoch}/training_epoch_loop.py | 2 +- ...ion_dataloader_loop.py => evaluation_loop.py} | 4 ++-- pytorch_lightning/loops/fit_loop.py | 2 +- pytorch_lightning/trainer/properties.py | 2 +- 11 files changed, 9 insertions(+), 25 deletions(-) create mode 100644 pytorch_lightning/loops/batch/__init__.py rename pytorch_lightning/loops/{ => batch}/training_batch_loop.py (100%) delete mode 100644 pytorch_lightning/loops/dataloader/__init__.py rename pytorch_lightning/loops/{dataloader => }/dataloader_loop.py (100%) create mode 100644 pytorch_lightning/loops/epoch/__init__.py rename pytorch_lightning/loops/{ => epoch}/evaluation_epoch_loop.py (100%) rename pytorch_lightning/loops/{ => epoch}/training_epoch_loop.py (99%) rename pytorch_lightning/loops/{dataloader/evaluation_dataloader_loop.py => evaluation_loop.py} (98%) diff --git a/pytorch_lightning/loops/__init__.py b/pytorch_lightning/loops/__init__.py index 2e56693db6446..06b566f9fdae4 100644 --- a/pytorch_lightning/loops/__init__.py +++ b/pytorch_lightning/loops/__init__.py @@ -13,8 +13,8 @@ # limitations under the License. from pytorch_lightning.loops.base import Loop # noqa: F401 -from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop # noqa: F401 -from pytorch_lightning.loops.dataloader.evaluation_dataloader_loop import EvaluationLoop # noqa: F401 +from pytorch_lightning.loops.dataloader_loop import DataLoaderLoop # noqa: F401 +from pytorch_lightning.loops.evaluation_loop import EvaluationLoop # noqa: F401 from pytorch_lightning.loops.fit_loop import FitLoop # noqa: F401 -from pytorch_lightning.loops.training_batch_loop import TrainingBatchLoop # noqa: F401 -from pytorch_lightning.loops.training_epoch_loop import TrainingEpochLoop # noqa: F401 +from pytorch_lightning.loops.batch.training_batch_loop import TrainingBatchLoop # noqa: F401 +from pytorch_lightning.loops.epoch.training_epoch_loop import TrainingEpochLoop # noqa: F401 diff --git a/pytorch_lightning/loops/batch/__init__.py b/pytorch_lightning/loops/batch/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pytorch_lightning/loops/training_batch_loop.py b/pytorch_lightning/loops/batch/training_batch_loop.py similarity index 100% rename from pytorch_lightning/loops/training_batch_loop.py rename to pytorch_lightning/loops/batch/training_batch_loop.py diff --git a/pytorch_lightning/loops/dataloader/__init__.py b/pytorch_lightning/loops/dataloader/__init__.py deleted file mode 100644 index c77711f2f5c86..0000000000000 --- a/pytorch_lightning/loops/dataloader/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop # noqa: F401 -from pytorch_lightning.loops.dataloader.evaluation_dataloader_loop import EvaluationLoop # noqa: F401 diff --git a/pytorch_lightning/loops/dataloader/dataloader_loop.py b/pytorch_lightning/loops/dataloader_loop.py similarity index 100% rename from pytorch_lightning/loops/dataloader/dataloader_loop.py rename to pytorch_lightning/loops/dataloader_loop.py diff --git a/pytorch_lightning/loops/epoch/__init__.py b/pytorch_lightning/loops/epoch/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pytorch_lightning/loops/evaluation_epoch_loop.py b/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py similarity index 100% rename from pytorch_lightning/loops/evaluation_epoch_loop.py rename to pytorch_lightning/loops/epoch/evaluation_epoch_loop.py diff --git a/pytorch_lightning/loops/training_epoch_loop.py b/pytorch_lightning/loops/epoch/training_epoch_loop.py similarity index 99% rename from pytorch_lightning/loops/training_epoch_loop.py rename to pytorch_lightning/loops/epoch/training_epoch_loop.py index a82f4b72e070b..f40276c3c535e 100644 --- a/pytorch_lightning/loops/training_epoch_loop.py +++ b/pytorch_lightning/loops/epoch/training_epoch_loop.py @@ -18,7 +18,7 @@ import pytorch_lightning as pl from pytorch_lightning.loops.base import Loop -from pytorch_lightning.loops.training_batch_loop import TrainingBatchLoop +from pytorch_lightning.loops.batch.training_batch_loop import TrainingBatchLoop from pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.model_helpers import is_overridden diff --git a/pytorch_lightning/loops/dataloader/evaluation_dataloader_loop.py b/pytorch_lightning/loops/evaluation_loop.py similarity index 98% rename from pytorch_lightning/loops/dataloader/evaluation_dataloader_loop.py rename to pytorch_lightning/loops/evaluation_loop.py index ef94b89a11f4b..e4a71b9d4607b 100644 --- a/pytorch_lightning/loops/dataloader/evaluation_dataloader_loop.py +++ b/pytorch_lightning/loops/evaluation_loop.py @@ -18,8 +18,8 @@ from torch.utils.data.dataloader import DataLoader import pytorch_lightning as pl -from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop -from pytorch_lightning.loops.evaluation_epoch_loop import EvaluationEpochLoop +from pytorch_lightning.loops.dataloader_loop import DataLoaderLoop +from pytorch_lightning.loops.epoch.evaluation_epoch_loop import EvaluationEpochLoop from pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection from pytorch_lightning.trainer.states import TrainerFn from pytorch_lightning.utilities.model_helpers import is_overridden diff --git a/pytorch_lightning/loops/fit_loop.py b/pytorch_lightning/loops/fit_loop.py index a0f9aed4068d3..ec07b0a21dffe 100644 --- a/pytorch_lightning/loops/fit_loop.py +++ b/pytorch_lightning/loops/fit_loop.py @@ -19,7 +19,7 @@ import pytorch_lightning as pl from pytorch_lightning.loops.base import Loop from pytorch_lightning.loops.dataloader.evaluation_dataloader_loop import EvaluationDataLoaderLoop -from pytorch_lightning.loops.training_epoch_loop import TrainingEpochLoop +from pytorch_lightning.loops.epoch.training_epoch_loop import TrainingEpochLoop from pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection from pytorch_lightning.trainer.supporters import TensorRunningAccum from pytorch_lightning.utilities import rank_zero_info diff --git a/pytorch_lightning/trainer/properties.py b/pytorch_lightning/trainer/properties.py index 2ddd43c789771..ee2ad3c00eaf6 100644 --- a/pytorch_lightning/trainer/properties.py +++ b/pytorch_lightning/trainer/properties.py @@ -29,7 +29,7 @@ from pytorch_lightning.core.optimizer import LightningOptimizer from pytorch_lightning.loggers import LightningLoggerBase from pytorch_lightning.loggers.tensorboard import TensorBoardLogger -from pytorch_lightning.loops.dataloader.evaluation_dataloader_loop import EvaluationLoop +from pytorch_lightning.loops.evaluation_loop import EvaluationLoop from pytorch_lightning.loops.fit_loop import FitLoop from pytorch_lightning.plugins import ParallelPlugin, PrecisionPlugin, TrainingTypePlugin from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector From bb8a4de1e4952a19aa8d01ac9ad15c4bd6741bec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Tue, 22 Jun 2021 12:40:08 +0200 Subject: [PATCH 04/12] imports --- pytorch_lightning/loops/__init__.py | 6 +++--- pytorch_lightning/loops/batch/__init__.py | 15 +++++++++++++++ pytorch_lightning/loops/dataloader/__init__.py | 16 ++++++++++++++++ .../loops/{ => dataloader}/dataloader_loop.py | 0 .../loops/{ => dataloader}/evaluation_loop.py | 2 +- pytorch_lightning/loops/epoch/__init__.py | 16 ++++++++++++++++ pytorch_lightning/trainer/properties.py | 2 +- 7 files changed, 52 insertions(+), 5 deletions(-) create mode 100644 pytorch_lightning/loops/dataloader/__init__.py rename pytorch_lightning/loops/{ => dataloader}/dataloader_loop.py (100%) rename pytorch_lightning/loops/{ => dataloader}/evaluation_loop.py (99%) diff --git a/pytorch_lightning/loops/__init__.py b/pytorch_lightning/loops/__init__.py index 06b566f9fdae4..77ba43b5705a9 100644 --- a/pytorch_lightning/loops/__init__.py +++ b/pytorch_lightning/loops/__init__.py @@ -13,8 +13,8 @@ # limitations under the License. from pytorch_lightning.loops.base import Loop # noqa: F401 -from pytorch_lightning.loops.dataloader_loop import DataLoaderLoop # noqa: F401 -from pytorch_lightning.loops.evaluation_loop import EvaluationLoop # noqa: F401 -from pytorch_lightning.loops.fit_loop import FitLoop # noqa: F401 from pytorch_lightning.loops.batch.training_batch_loop import TrainingBatchLoop # noqa: F401 +from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop # noqa: F401 +from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop # noqa: F401 from pytorch_lightning.loops.epoch.training_epoch_loop import TrainingEpochLoop # noqa: F401 +from pytorch_lightning.loops.fit_loop import FitLoop # noqa: F401 diff --git a/pytorch_lightning/loops/batch/__init__.py b/pytorch_lightning/loops/batch/__init__.py index e69de29bb2d1d..6e6522165404a 100644 --- a/pytorch_lightning/loops/batch/__init__.py +++ b/pytorch_lightning/loops/batch/__init__.py @@ -0,0 +1,15 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytorch_lightning.loops.batch.training_batch_loop import TrainingBatchLoop # noqa: F401 diff --git a/pytorch_lightning/loops/dataloader/__init__.py b/pytorch_lightning/loops/dataloader/__init__.py new file mode 100644 index 0000000000000..437ddc7c75e9e --- /dev/null +++ b/pytorch_lightning/loops/dataloader/__init__.py @@ -0,0 +1,16 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop # noqa: F401 +from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop # noqa: F401 diff --git a/pytorch_lightning/loops/dataloader_loop.py b/pytorch_lightning/loops/dataloader/dataloader_loop.py similarity index 100% rename from pytorch_lightning/loops/dataloader_loop.py rename to pytorch_lightning/loops/dataloader/dataloader_loop.py diff --git a/pytorch_lightning/loops/evaluation_loop.py b/pytorch_lightning/loops/dataloader/evaluation_loop.py similarity index 99% rename from pytorch_lightning/loops/evaluation_loop.py rename to pytorch_lightning/loops/dataloader/evaluation_loop.py index e4a71b9d4607b..c01cbe55d72e4 100644 --- a/pytorch_lightning/loops/evaluation_loop.py +++ b/pytorch_lightning/loops/dataloader/evaluation_loop.py @@ -18,7 +18,7 @@ from torch.utils.data.dataloader import DataLoader import pytorch_lightning as pl -from pytorch_lightning.loops.dataloader_loop import DataLoaderLoop +from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop from pytorch_lightning.loops.epoch.evaluation_epoch_loop import EvaluationEpochLoop from pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection from pytorch_lightning.trainer.states import TrainerFn diff --git a/pytorch_lightning/loops/epoch/__init__.py b/pytorch_lightning/loops/epoch/__init__.py index e69de29bb2d1d..08d0c6a63c342 100644 --- a/pytorch_lightning/loops/epoch/__init__.py +++ b/pytorch_lightning/loops/epoch/__init__.py @@ -0,0 +1,16 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytorch_lightning.loops.epoch.evaluation_epoch_loop import EvaluationEpochLoop # noqa: F401 +from pytorch_lightning.loops.epoch.training_epoch_loop import TrainingEpochLoop # noqa: F401 diff --git a/pytorch_lightning/trainer/properties.py b/pytorch_lightning/trainer/properties.py index ee2ad3c00eaf6..33082d8e92e05 100644 --- a/pytorch_lightning/trainer/properties.py +++ b/pytorch_lightning/trainer/properties.py @@ -29,7 +29,7 @@ from pytorch_lightning.core.optimizer import LightningOptimizer from pytorch_lightning.loggers import LightningLoggerBase from pytorch_lightning.loggers.tensorboard import TensorBoardLogger -from pytorch_lightning.loops.evaluation_loop import EvaluationLoop +from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop from pytorch_lightning.loops.fit_loop import FitLoop from pytorch_lightning.plugins import ParallelPlugin, PrecisionPlugin, TrainingTypePlugin from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector From a23eb529e82aacd7c4902bad24f4ba8f0b9eb592 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 22 Jun 2021 10:46:38 +0000 Subject: [PATCH 05/12] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/trainer/loops/test_evaluation_loop.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/trainer/loops/test_evaluation_loop.py b/tests/trainer/loops/test_evaluation_loop.py index 8f3cbaaa3cf00..62740d8a27d5c 100644 --- a/tests/trainer/loops/test_evaluation_loop.py +++ b/tests/trainer/loops/test_evaluation_loop.py @@ -21,9 +21,7 @@ from tests.helpers.runif import RunIf -@mock.patch( - "pytorch_lightning.loops.dataloader.evaluation_dataloader_loop.EvaluationLoop.on_evaluation_epoch_end" -) +@mock.patch("pytorch_lightning.loops.dataloader.evaluation_dataloader_loop.EvaluationLoop.on_evaluation_epoch_end") def test_on_evaluation_epoch_end(eval_epoch_end_mock, tmpdir): """ Tests that `on_evaluation_epoch_end` is called From 7fa3f727d191a58bcd8e1c7c928f7d9c395898cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Wed, 23 Jun 2021 11:42:54 +0200 Subject: [PATCH 06/12] bad merge --- pytorch_lightning/loops/fit_loop.py | 7 +++---- .../connectors/logger_connector/logger_connector.py | 2 +- pytorch_lightning/trainer/properties.py | 6 +++--- pytorch_lightning/trainer/trainer.py | 6 +++--- tests/trainer/loops/test_evaluation_loop.py | 2 +- 5 files changed, 11 insertions(+), 12 deletions(-) diff --git a/pytorch_lightning/loops/fit_loop.py b/pytorch_lightning/loops/fit_loop.py index ec07b0a21dffe..8fda6bde5d9cc 100644 --- a/pytorch_lightning/loops/fit_loop.py +++ b/pytorch_lightning/loops/fit_loop.py @@ -18,7 +18,7 @@ import pytorch_lightning as pl from pytorch_lightning.loops.base import Loop -from pytorch_lightning.loops.dataloader.evaluation_dataloader_loop import EvaluationDataLoaderLoop +from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop from pytorch_lightning.loops.epoch.training_epoch_loop import TrainingEpochLoop from pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection from pytorch_lightning.trainer.supporters import TensorRunningAccum @@ -52,13 +52,12 @@ def __init__( self.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs self.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs self.epoch_loop = TrainingEpochLoop(min_steps, max_steps) - self.validation_loop = EvaluationDataLoaderLoop() - self.results = ResultCollection(training=True) + self.validation_loop = EvaluationLoop() @property def results(self) -> ResultCollection: if self.trainer.training: - return self.training_loop.results + return self.epoch_loop.results elif self.trainer.validating: return self.validation_loop.results raise RuntimeError("`FitLoop.results` property isn't defined. Accessed outside of scope") diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 25526a829c0a8..27407fb98c159 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -299,7 +299,7 @@ def progress_bar_metrics(self) -> Dict[str, float]: return self._progress_bar_metrics def teardown(self): - self.trainer.fit_loop.training_loop._results.cpu() + self.trainer.fit_loop.epoch_loop._results.cpu() self.trainer.fit_loop.validation_loop._results.cpu() self.trainer.validation_loop._results.cpu() self.trainer.test_loop._results.cpu() diff --git a/pytorch_lightning/trainer/properties.py b/pytorch_lightning/trainer/properties.py index 33082d8e92e05..5becc9d78c2aa 100644 --- a/pytorch_lightning/trainer/properties.py +++ b/pytorch_lightning/trainer/properties.py @@ -63,8 +63,8 @@ class TrainerProperties(ABC): logger_connector: LoggerConnector state: TrainerState fit_loop: FitLoop - validation_loop: EvaluationDataLoaderLoop - test_loop: EvaluationDataLoaderLoop + validation_loop: EvaluationLoop + test_loop: EvaluationLoop """ Accelerator properties """ @@ -489,7 +489,7 @@ def sanity_checking(self, val: bool) -> None: """ @property - def evaluation_loop(self) -> EvaluationDataLoaderLoop: + def evaluation_loop(self) -> EvaluationLoop: if self.state.fn in (TrainerFn.FITTING, TrainerFn.TUNING): return self.fit_loop.validation_loop elif self.state.fn == TrainerFn.VALIDATING: diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index dd201b49e427b..759906b89deee 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -27,7 +27,7 @@ from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.core.memory import ModelSummary from pytorch_lightning.loggers import LightningLoggerBase -from pytorch_lightning.loops.dataloader.evaluation_dataloader_loop import EvaluationDataLoaderLoop +from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop from pytorch_lightning.loops.dataloader.prediction_dataloader_loop import PredictionDataLoaderLoop from pytorch_lightning.loops.fit_loop import FitLoop from pytorch_lightning.plugins import Plugin @@ -343,8 +343,8 @@ def __init__( self.tuner = Tuner(self) self.fit_loop = FitLoop(min_epochs, max_epochs, min_steps, max_steps) - self.validation_loop = EvaluationDataLoaderLoop() - self.test_loop = EvaluationDataLoaderLoop() + self.validation_loop = EvaluationLoop() + self.test_loop = EvaluationLoop() self.predict_loop = PredictionDataLoaderLoop() self.fit_loop.connect(self) self.validation_loop.connect(self) diff --git a/tests/trainer/loops/test_evaluation_loop.py b/tests/trainer/loops/test_evaluation_loop.py index 62740d8a27d5c..2a0f95a19209b 100644 --- a/tests/trainer/loops/test_evaluation_loop.py +++ b/tests/trainer/loops/test_evaluation_loop.py @@ -21,7 +21,7 @@ from tests.helpers.runif import RunIf -@mock.patch("pytorch_lightning.loops.dataloader.evaluation_dataloader_loop.EvaluationLoop.on_evaluation_epoch_end") +@mock.patch("pytorch_lightning.loops.dataloader.evaluation_loop.EvaluationLoop.on_evaluation_epoch_end") def test_on_evaluation_epoch_end(eval_epoch_end_mock, tmpdir): """ Tests that `on_evaluation_epoch_end` is called From 4657935b7c93d4997699e83720787d4e33204eb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Wed, 23 Jun 2021 11:46:29 +0200 Subject: [PATCH 07/12] prediction loop renaming --- .../{prediction_dataloader_loop.py => prediction_loop.py} | 4 ++-- pytorch_lightning/loops/{ => epoch}/prediction_epoch_loop.py | 0 pytorch_lightning/trainer/trainer.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) rename pytorch_lightning/loops/dataloader/{prediction_dataloader_loop.py => prediction_loop.py} (97%) rename pytorch_lightning/loops/{ => epoch}/prediction_epoch_loop.py (100%) diff --git a/pytorch_lightning/loops/dataloader/prediction_dataloader_loop.py b/pytorch_lightning/loops/dataloader/prediction_loop.py similarity index 97% rename from pytorch_lightning/loops/dataloader/prediction_dataloader_loop.py rename to pytorch_lightning/loops/dataloader/prediction_loop.py index 80077e1e2aaae..542f94fdb087e 100644 --- a/pytorch_lightning/loops/dataloader/prediction_dataloader_loop.py +++ b/pytorch_lightning/loops/dataloader/prediction_loop.py @@ -5,13 +5,13 @@ import pytorch_lightning as pl from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop -from pytorch_lightning.loops.prediction_epoch_loop import PredictionEpochLoop +from pytorch_lightning.loops.epoch.prediction_epoch_loop import PredictionEpochLoop from pytorch_lightning.plugins import DDPSpawnPlugin from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.types import _PREDICT_OUTPUT -class PredictionDataLoaderLoop(DataLoaderLoop): +class PredictionLoop(DataLoaderLoop): """Loop to run over dataloaders for prediction""" def __init__(self): diff --git a/pytorch_lightning/loops/prediction_epoch_loop.py b/pytorch_lightning/loops/epoch/prediction_epoch_loop.py similarity index 100% rename from pytorch_lightning/loops/prediction_epoch_loop.py rename to pytorch_lightning/loops/epoch/prediction_epoch_loop.py diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 759906b89deee..c5ee90cd126ce 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -28,7 +28,7 @@ from pytorch_lightning.core.memory import ModelSummary from pytorch_lightning.loggers import LightningLoggerBase from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop -from pytorch_lightning.loops.dataloader.prediction_dataloader_loop import PredictionDataLoaderLoop +from pytorch_lightning.loops.dataloader.prediction_loop import PredictionLoop from pytorch_lightning.loops.fit_loop import FitLoop from pytorch_lightning.plugins import Plugin from pytorch_lightning.plugins.environments import ClusterEnvironment @@ -345,7 +345,7 @@ def __init__( self.fit_loop = FitLoop(min_epochs, max_epochs, min_steps, max_steps) self.validation_loop = EvaluationLoop() self.test_loop = EvaluationLoop() - self.predict_loop = PredictionDataLoaderLoop() + self.predict_loop = PredictionLoop() self.fit_loop.connect(self) self.validation_loop.connect(self) self.test_loop.connect(self) From 3b7eaecd471700cbe23137afc8a7cba965e34975 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Wed, 23 Jun 2021 11:46:39 +0200 Subject: [PATCH 08/12] update changelog --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d1bfed8ab2ceb..2d93c86957d50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -146,13 +146,13 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). * Simplified "should run validation" logic ([#7682](https://github.com/PyTorchLightning/pytorch-lightning/pull/7682)) * Simplified logic for updating the learning rate for schedulers ([#7682](https://github.com/PyTorchLightning/pytorch-lightning/pull/7682)) * Removed the `on_epoch` guard from the "should stop" validation check ([#7701](https://github.com/PyTorchLightning/pytorch-lightning/pull/7701)) - * Refactored internal loop interface; added new classes `FitLoop`, `TrainingEpochLoop`, `TrainingBatchLoop` ([#7871](https://github.com/PyTorchLightning/pytorch-lightning/pull/7871)) + * Refactored internal loop interface; added new classes `FitLoop`, `TrainingEpochLoop`, `TrainingBatchLoop` ([#7871](https://github.com/PyTorchLightning/pytorch-lightning/pull/7871), [#8077](https://github.com/PyTorchLightning/pytorch-lightning/pull/8077)) * Removed `pytorch_lightning/trainer/training_loop.py` ([#7985](https://github.com/PyTorchLightning/pytorch-lightning/pull/7985)) - * Refactored evaluation loop interface; added new classes `DataLoaderLoop`, `EvaluationLoop`, `EvaluationEpochLoop` ([#7990](https://github.com/PyTorchLightning/pytorch-lightning/pull/7990)) + * Refactored evaluation loop interface; added new classes `DataLoaderLoop`, `EvaluationLoop`, `EvaluationEpochLoop` ([#7990](https://github.com/PyTorchLightning/pytorch-lightning/pull/7990), [#8077](https://github.com/PyTorchLightning/pytorch-lightning/pull/8077)) * Removed `pytorch_lightning/trainer/evaluation_loop.py` ([#8056](https://github.com/PyTorchLightning/pytorch-lightning/pull/8056)) * Restricted public access to several internal functions ([#8024](https://github.com/PyTorchLightning/pytorch-lightning/pull/8024)) * Refactored trainer `_run_*` functions and separate evaluation loops ([#8065](https://github.com/PyTorchLightning/pytorch-lightning/pull/8065)) - * Refactored prediction loop interface; added new classes `PredictionDataLoaderLoop`, `PredictionEpochLoop` ([#7700](https://github.com/PyTorchLightning/pytorch-lightning/pull/7700)) + * Refactored prediction loop interface; added new classes `PredictionLoop`, `PredictionEpochLoop` ([#7700](https://github.com/PyTorchLightning/pytorch-lightning/pull/7700), [#8077](https://github.com/PyTorchLightning/pytorch-lightning/pull/8077)) * Removed `pytorch_lightning/trainer/predict_loop.py` ([#8094](https://github.com/PyTorchLightning/pytorch-lightning/pull/8094)) From 9538c659085160b1e387bbe3e52ef2d68644e41e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Wed, 23 Jun 2021 11:58:44 +0200 Subject: [PATCH 09/12] update init files --- pytorch_lightning/loops/__init__.py | 3 +++ pytorch_lightning/loops/dataloader/__init__.py | 1 + pytorch_lightning/loops/epoch/__init__.py | 1 + 3 files changed, 5 insertions(+) diff --git a/pytorch_lightning/loops/__init__.py b/pytorch_lightning/loops/__init__.py index 77ba43b5705a9..fa15a0513ae5d 100644 --- a/pytorch_lightning/loops/__init__.py +++ b/pytorch_lightning/loops/__init__.py @@ -16,5 +16,8 @@ from pytorch_lightning.loops.batch.training_batch_loop import TrainingBatchLoop # noqa: F401 from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop # noqa: F401 from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop # noqa: F401 +from pytorch_lightning.loops.dataloader.prediction_loop import PredictionLoop # noqa: F401 +from pytorch_lightning.loops.epoch.evaluation_epoch_loop import EvaluationEpochLoop # noqa: F401 +from pytorch_lightning.loops.epoch.prediction_epoch_loop import PredictionEpochLoop # noqa: F401 from pytorch_lightning.loops.epoch.training_epoch_loop import TrainingEpochLoop # noqa: F401 from pytorch_lightning.loops.fit_loop import FitLoop # noqa: F401 diff --git a/pytorch_lightning/loops/dataloader/__init__.py b/pytorch_lightning/loops/dataloader/__init__.py index 437ddc7c75e9e..db2b2f7926d50 100644 --- a/pytorch_lightning/loops/dataloader/__init__.py +++ b/pytorch_lightning/loops/dataloader/__init__.py @@ -14,3 +14,4 @@ from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop # noqa: F401 from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop # noqa: F401 +from pytorch_lightning.loops.dataloader.prediction_loop import PredictionLoop # noqa: F401 diff --git a/pytorch_lightning/loops/epoch/__init__.py b/pytorch_lightning/loops/epoch/__init__.py index 08d0c6a63c342..789953937a6b4 100644 --- a/pytorch_lightning/loops/epoch/__init__.py +++ b/pytorch_lightning/loops/epoch/__init__.py @@ -13,4 +13,5 @@ # limitations under the License. from pytorch_lightning.loops.epoch.evaluation_epoch_loop import EvaluationEpochLoop # noqa: F401 +from pytorch_lightning.loops.epoch.prediction_epoch_loop import PredictionEpochLoop # noqa: F401 from pytorch_lightning.loops.epoch.training_epoch_loop import TrainingEpochLoop # noqa: F401 From 5b1367722b6bd0120d4cbc6a882bb71786deeaf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Wed, 23 Jun 2021 15:44:39 +0200 Subject: [PATCH 10/12] fix bad merge --- pytorch_lightning/callbacks/finetuning.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/callbacks/finetuning.py b/pytorch_lightning/callbacks/finetuning.py index a65dce9144fa9..cac4e4c9c857e 100644 --- a/pytorch_lightning/callbacks/finetuning.py +++ b/pytorch_lightning/callbacks/finetuning.py @@ -285,7 +285,7 @@ def _store( def on_train_epoch_start(self, trainer, pl_module): """Called when the epoch begins.""" - for opt_idx, optimizer in trainer.fit_loop.training_loop.batch_loop.get_active_optimizers(): + for opt_idx, optimizer in trainer.fit_loop.epoch_loop.batch_loop.get_active_optimizers(): num_param_groups = len(optimizer.param_groups) self.finetune_function(pl_module, trainer.current_epoch, optimizer, opt_idx) current_param_groups = optimizer.param_groups From 2edb154a94e5bc27b0e6258ac3c8f387797c491c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Wed, 23 Jun 2021 23:37:19 +0200 Subject: [PATCH 11/12] glue imports together --- pytorch_lightning/loops/__init__.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/pytorch_lightning/loops/__init__.py b/pytorch_lightning/loops/__init__.py index fa15a0513ae5d..b7eb47167d26f 100644 --- a/pytorch_lightning/loops/__init__.py +++ b/pytorch_lightning/loops/__init__.py @@ -13,11 +13,7 @@ # limitations under the License. from pytorch_lightning.loops.base import Loop # noqa: F401 -from pytorch_lightning.loops.batch.training_batch_loop import TrainingBatchLoop # noqa: F401 -from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop # noqa: F401 -from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop # noqa: F401 -from pytorch_lightning.loops.dataloader.prediction_loop import PredictionLoop # noqa: F401 -from pytorch_lightning.loops.epoch.evaluation_epoch_loop import EvaluationEpochLoop # noqa: F401 -from pytorch_lightning.loops.epoch.prediction_epoch_loop import PredictionEpochLoop # noqa: F401 -from pytorch_lightning.loops.epoch.training_epoch_loop import TrainingEpochLoop # noqa: F401 +from pytorch_lightning.loops.batch import TrainingBatchLoop # noqa: F401 +from pytorch_lightning.loops.dataloader import DataLoaderLoop, EvaluationLoop, PredictionLoop # noqa: F401 +from pytorch_lightning.loops.epoch import EvaluationEpochLoop, PredictionEpochLoop, TrainingEpochLoop # noqa: F401 from pytorch_lightning.loops.fit_loop import FitLoop # noqa: F401 From de1297a1184c94ef5012891463e86bea89acd513 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Thu, 24 Jun 2021 15:33:32 +0200 Subject: [PATCH 12/12] rename fit_loop.validation_loop to fit_loop.val_loop --- pytorch_lightning/loops/epoch/training_epoch_loop.py | 4 ++-- pytorch_lightning/loops/fit_loop.py | 6 +++--- .../connectors/logger_connector/logger_connector.py | 2 +- pytorch_lightning/trainer/properties.py | 2 +- tests/trainer/test_trainer.py | 8 ++++---- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/pytorch_lightning/loops/epoch/training_epoch_loop.py b/pytorch_lightning/loops/epoch/training_epoch_loop.py index f40276c3c535e..dddffc223e66d 100644 --- a/pytorch_lightning/loops/epoch/training_epoch_loop.py +++ b/pytorch_lightning/loops/epoch/training_epoch_loop.py @@ -175,10 +175,10 @@ def on_advance_end(self): def _run_validation(self): # reload dataloaders - self.trainer.fit_loop.validation_loop.reload_evaluation_dataloaders() + self.trainer.fit_loop.val_loop.reload_evaluation_dataloaders() with torch.no_grad(): - self.trainer.fit_loop.validation_loop.run() + self.trainer.fit_loop.val_loop.run() def on_run_end(self) -> List[List[STEP_OUTPUT]]: """Calls the on_epoch_end hook. diff --git a/pytorch_lightning/loops/fit_loop.py b/pytorch_lightning/loops/fit_loop.py index 8fda6bde5d9cc..4b404fdaf79aa 100644 --- a/pytorch_lightning/loops/fit_loop.py +++ b/pytorch_lightning/loops/fit_loop.py @@ -52,14 +52,14 @@ def __init__( self.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs self.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs self.epoch_loop = TrainingEpochLoop(min_steps, max_steps) - self.validation_loop = EvaluationLoop() + self.val_loop = EvaluationLoop() @property def results(self) -> ResultCollection: if self.trainer.training: return self.epoch_loop.results elif self.trainer.validating: - return self.validation_loop.results + return self.val_loop.results raise RuntimeError("`FitLoop.results` property isn't defined. Accessed outside of scope") @property @@ -166,7 +166,7 @@ def connect(self, trainer: 'pl.Trainer', *args: Any, **kwargs: Any) -> None: """Connects the loop with necessary arguments like the trainer""" super().connect(trainer, *args, **kwargs) self.epoch_loop.connect(trainer) - self.validation_loop.connect(trainer) + self.val_loop.connect(trainer) def reset(self) -> None: """Resets the internal state of this loop""" diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 27407fb98c159..86d33bf1e6402 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -300,6 +300,6 @@ def progress_bar_metrics(self) -> Dict[str, float]: def teardown(self): self.trainer.fit_loop.epoch_loop._results.cpu() - self.trainer.fit_loop.validation_loop._results.cpu() + self.trainer.fit_loop.val_loop._results.cpu() self.trainer.validation_loop._results.cpu() self.trainer.test_loop._results.cpu() diff --git a/pytorch_lightning/trainer/properties.py b/pytorch_lightning/trainer/properties.py index 5becc9d78c2aa..b77b1b8268b9a 100644 --- a/pytorch_lightning/trainer/properties.py +++ b/pytorch_lightning/trainer/properties.py @@ -491,7 +491,7 @@ def sanity_checking(self, val: bool) -> None: @property def evaluation_loop(self) -> EvaluationLoop: if self.state.fn in (TrainerFn.FITTING, TrainerFn.TUNING): - return self.fit_loop.validation_loop + return self.fit_loop.val_loop elif self.state.fn == TrainerFn.VALIDATING: return self.validation_loop elif self.state.fn == TrainerFn.TESTING: diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 7d29376efbb0b..d91f8d0a427e7 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -1101,9 +1101,9 @@ def test_num_sanity_val_steps(tmpdir, limit_val_batches): assert trainer.num_sanity_val_steps == num_sanity_val_steps with patch.object( - trainer.fit_loop.validation_loop.epoch_loop, + trainer.fit_loop.val_loop.epoch_loop, "evaluation_step", - wraps=trainer.fit_loop.validation_loop.epoch_loop.evaluation_step + wraps=trainer.fit_loop.val_loop.epoch_loop.evaluation_step ) as mocked: val_dataloaders = model.val_dataloader__multiple_mixed_length() trainer.fit(model, val_dataloaders=val_dataloaders) @@ -1131,9 +1131,9 @@ def test_num_sanity_val_steps_neg_one(tmpdir, limit_val_batches): assert trainer.num_sanity_val_steps == float("inf") with patch.object( - trainer.fit_loop.validation_loop.epoch_loop, + trainer.fit_loop.val_loop.epoch_loop, "evaluation_step", - wraps=trainer.fit_loop.validation_loop.epoch_loop.evaluation_step + wraps=trainer.fit_loop.val_loop.epoch_loop.evaluation_step ) as mocked: val_dataloaders = model.val_dataloader__multiple() trainer.fit(model, val_dataloaders=val_dataloaders)