diff --git a/CHANGELOG.md b/CHANGELOG.md index 8bf2ee6664270..71f9cb424884e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Avoid the deprecated `onnx.export(example_outputs=...)` in torch 1.10 ([#11116](https://github.com/PyTorchLightning/pytorch-lightning/pull/11116)) - Fixed an issue when torch-scripting a `LightningModule` after training with `Trainer(sync_batchnorm=True)` ([#11078](https://github.com/PyTorchLightning/pytorch-lightning/pull/11078)) - Fixed an `AttributeError` occuring when using a `CombinedLoader` (multiple dataloaders) for prediction ([#11111](https://github.com/PyTorchLightning/pytorch-lightning/pull/11111)) +- Fixed bug where `Trainer(track_grad_norm=..., logger=False)' would fail ([#11114](https://github.com/PyTorchLightning/pytorch-lightning/pull/11114)) ### Changed diff --git a/pytorch_lightning/callbacks/device_stats_monitor.py b/pytorch_lightning/callbacks/device_stats_monitor.py index b743ed3e1bbeb..016d2015a81e1 100644 --- a/pytorch_lightning/callbacks/device_stats_monitor.py +++ b/pytorch_lightning/callbacks/device_stats_monitor.py @@ -59,6 +59,7 @@ def on_train_batch_start( device_stats = trainer.accelerator.get_device_stats(pl_module.device) prefixed_device_stats = prefix_metrics_keys(device_stats, "on_train_batch_start") + assert trainer.logger is not None trainer.logger.log_metrics(prefixed_device_stats, step=trainer.global_step) def on_train_batch_end( @@ -75,6 +76,7 @@ def on_train_batch_end( device_stats = trainer.accelerator.get_device_stats(pl_module.device) prefixed_device_stats = prefix_metrics_keys(device_stats, "on_train_batch_end") + assert trainer.logger is not None trainer.logger.log_metrics(prefixed_device_stats, step=trainer.global_step) diff --git a/pytorch_lightning/callbacks/gpu_stats_monitor.py b/pytorch_lightning/callbacks/gpu_stats_monitor.py index 7ee6771056666..7bb0289050a1e 100644 --- a/pytorch_lightning/callbacks/gpu_stats_monitor.py +++ b/pytorch_lightning/callbacks/gpu_stats_monitor.py @@ -161,6 +161,7 @@ def on_train_batch_start( # First log at beginning of second step logs["batch_time/inter_step (ms)"] = (time.time() - self._snap_inter_step_time) * 1000 + assert trainer.logger is not None trainer.logger.log_metrics(logs, step=trainer.global_step) @rank_zero_only @@ -185,6 +186,7 @@ def on_train_batch_end( if self._log_stats.intra_step_time and self._snap_intra_step_time: logs["batch_time/intra_step (ms)"] = (time.time() - self._snap_intra_step_time) * 1000 + assert trainer.logger is not None trainer.logger.log_metrics(logs, step=trainer.global_step) @staticmethod diff --git a/pytorch_lightning/plugins/precision/precision_plugin.py b/pytorch_lightning/plugins/precision/precision_plugin.py index f1ebbf58d8326..012922ea2b60a 100644 --- a/pytorch_lightning/plugins/precision/precision_plugin.py +++ b/pytorch_lightning/plugins/precision/precision_plugin.py @@ -165,7 +165,8 @@ def optimizer_step( def _track_grad_norm(self, trainer: "pl.Trainer") -> None: if trainer.track_grad_norm == -1: return - grad_norm_dict = grad_norm(trainer.lightning_module, trainer.track_grad_norm, trainer.logger.group_separator) + kwargs = {"group_separator": trainer.logger.group_separator} if trainer.logger is not None else {} + grad_norm_dict = grad_norm(trainer.lightning_module, trainer.track_grad_norm, **kwargs) if grad_norm_dict: prev_fx = trainer.lightning_module._current_fx_name trainer.lightning_module._current_fx_name = "on_before_optimizer_step" diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 18be005715aca..a21763259ece2 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -569,6 +569,7 @@ def __init__( self.__init_profiler(profiler) # init logger flags + self.logger: Optional[LightningLoggerBase] self.logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu) # init debugging flags