diff --git a/CHANGELOG.md b/CHANGELOG.md index cea118de512a5..6bda79444eb43 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed `num_classes` argument in F1 metric ([#5663](https://github.com/PyTorchLightning/pytorch-lightning/pull/5663)) +- Fixed `log_dir` property ([#5537](https://github.com/PyTorchLightning/pytorch-lightning/pull/5537)) + + - Fixed a race condition in `ModelCheckpoint` when checking if a checkpoint file exists ([#5144](https://github.com/PyTorchLightning/pytorch-lightning/pull/5144)) - Remove unnecessary intermediate layers in Dockerfiles ([#5697](https://github.com/PyTorchLightning/pytorch-lightning/pull/5697)) diff --git a/pytorch_lightning/callbacks/model_checkpoint.py b/pytorch_lightning/callbacks/model_checkpoint.py index 3aa186b08c652..6ec444eaa3838 100644 --- a/pytorch_lightning/callbacks/model_checkpoint.py +++ b/pytorch_lightning/callbacks/model_checkpoint.py @@ -194,7 +194,7 @@ def on_pretrain_routine_start(self, trainer, pl_module): """ When pretrain routine starts we build the ckpt dir on the fly """ - self.__resolve_ckpt_dir(trainer, pl_module) + self.__resolve_ckpt_dir(trainer) self.save_function = trainer.save_checkpoint def on_validation_end(self, trainer, pl_module): @@ -448,7 +448,7 @@ def format_checkpoint_name( ckpt_name = f"{filename}{self.FILE_EXTENSION}" return os.path.join(self.dirpath, ckpt_name) if self.dirpath else ckpt_name - def __resolve_ckpt_dir(self, trainer, pl_module): + def __resolve_ckpt_dir(self, trainer): """ Determines model checkpoint save directory at runtime. References attributes from the trainer's logger to determine where to save checkpoints. diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py index f92c44ab27b7f..0c3222851e13b 100644 --- a/pytorch_lightning/loggers/wandb.py +++ b/pytorch_lightning/loggers/wandb.py @@ -23,7 +23,8 @@ import torch.nn as nn from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment -from pytorch_lightning.utilities import rank_zero_only, _module_available +from pytorch_lightning.utilities import _module_available, rank_zero_only +from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.warning_utils import WarningCache _WANDB_AVAILABLE = _module_available("wandb") @@ -98,6 +99,14 @@ def __init__( if wandb is None: raise ImportError('You want to use `wandb` logger which is not installed yet,' # pragma: no-cover ' install it with `pip install wandb`.') + + if offline and log_model: + raise MisconfigurationException( + f'Providing log_model={log_model} and offline={offline} is an invalid configuration' + ' since model checkpoints cannot be uploaded in offline mode.\n' + 'Hint: Set `offline=False` to log your model.' + ) + super().__init__() self._name = name self._save_dir = save_dir @@ -141,10 +150,12 @@ def experiment(self) -> Run: self._experiment = wandb.init( name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous, id=self._id, resume='allow', **self._kwargs) if wandb.run is None else wandb.run + # offset logging step when resuming a run self._step_offset = self._experiment.step + # save checkpoints in wandb dir to upload on W&B servers - if self._log_model: + if self._save_dir is None: self._save_dir = self._experiment.dir return self._experiment diff --git a/pytorch_lightning/trainer/properties.py b/pytorch_lightning/trainer/properties.py index f16b12f69e221..a75a15ba9213b 100644 --- a/pytorch_lightning/trainer/properties.py +++ b/pytorch_lightning/trainer/properties.py @@ -11,10 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from abc import ABC -from argparse import ArgumentParser, Namespace import inspect import os +from abc import ABC +from argparse import ArgumentParser, Namespace from typing import cast, List, Optional, Type, TypeVar, Union from pytorch_lightning.accelerators.accelerator import Accelerator @@ -63,16 +63,10 @@ class TrainerProperties(ABC): @property def log_dir(self): - if self.checkpoint_callback is not None: - dirpath = self.checkpoint_callback.dirpath - dirpath = os.path.split(dirpath)[0] - elif self.logger is not None: - if isinstance(self.logger, TensorBoardLogger): - dirpath = self.logger.log_dir - else: - dirpath = self.logger.save_dir + if self.logger is None: + dirpath = self.default_root_dir else: - dirpath = self._default_root_dir + dirpath = getattr(self.logger, 'log_dir' if isinstance(self.logger, TensorBoardLogger) else 'save_dir') if self.accelerator_backend is not None: dirpath = self.accelerator_backend.broadcast(dirpath) diff --git a/tests/loggers/test_comet.py b/tests/loggers/test_comet.py index fc61829645b6e..b272f98c8925d 100644 --- a/tests/loggers/test_comet.py +++ b/tests/loggers/test_comet.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import os -from unittest.mock import patch, DEFAULT +from unittest.mock import DEFAULT, patch import pytest @@ -74,7 +74,7 @@ def test_comet_logger_online(comet): @patch('pytorch_lightning.loggers.comet.comet_ml') def test_comet_logger_no_api_key_given(comet): """ Test that CometLogger fails to initialize if both api key and save_dir are missing. """ - with pytest.raises(MisconfigurationException): + with pytest.raises(MisconfigurationException, match='requires either api_key or save_dir'): comet.config.get_api_key.return_value = None CometLogger(workspace='dummy-test', project_name='general') @@ -89,13 +89,10 @@ def test_comet_logger_experiment_name(comet): # Test api_key given with patch('pytorch_lightning.loggers.comet.CometExperiment') as comet_experiment: logger = CometLogger(api_key=api_key, experiment_name=experiment_name,) - assert logger._experiment is None _ = logger.experiment - comet_experiment.assert_called_once_with(api_key=api_key, project_name=None) - comet_experiment().set_name.assert_called_once_with(experiment_name) @@ -118,13 +115,10 @@ def save_os_environ(*args, **kwargs): with patch.dict(os.environ, {"COMET_EXPERIMENT_KEY": experiment_key}): with patch('pytorch_lightning.loggers.comet.CometExperiment', side_effect=save_os_environ) as comet_experiment: logger = CometLogger(api_key=api_key) - assert logger.version == experiment_key - assert logger._experiment is None _ = logger.experiment - comet_experiment.assert_called_once_with(api_key=api_key, project_name=None) assert instantation_environ["COMET_EXPERIMENT_KEY"] == experiment_key @@ -156,10 +150,12 @@ def test_comet_logger_dirs_creation(comet, comet_experiment, tmpdir, monkeypatch model = EvalModelTemplate() trainer = Trainer(default_root_dir=tmpdir, logger=logger, max_epochs=1, limit_val_batches=3) + assert trainer.log_dir == logger.save_dir trainer.fit(model) assert trainer.checkpoint_callback.dirpath == (tmpdir / 'test' / "1" / 'checkpoints') assert set(os.listdir(trainer.checkpoint_callback.dirpath)) == {'epoch=0-step=9.ckpt'} + assert trainer.log_dir == logger.save_dir @patch('pytorch_lightning.loggers.comet.comet_ml') @@ -170,11 +166,8 @@ def test_comet_name_default(comet): with patch('pytorch_lightning.loggers.comet.CometExperiment'): logger = CometLogger(api_key=api_key) - assert logger._experiment is None - assert logger.name == "comet-default" - assert logger._experiment is None @@ -187,11 +180,8 @@ def test_comet_name_project_name(comet): with patch('pytorch_lightning.loggers.comet.CometExperiment'): logger = CometLogger(api_key=api_key, project_name=project_name) - assert logger._experiment is None - assert logger.name == project_name - assert logger._experiment is None @@ -205,14 +195,11 @@ def test_comet_version_without_experiment(comet): with patch('pytorch_lightning.loggers.comet.CometExperiment'): logger = CometLogger(api_key=api_key, experiment_name=experiment_name) - assert logger._experiment is None first_version = logger.version assert first_version is not None - assert logger.version == first_version - assert logger._experiment is None _ = logger.experiment diff --git a/tests/loggers/test_mlflow.py b/tests/loggers/test_mlflow.py index c6072afbb69e2..c0f99220ed00b 100644 --- a/tests/loggers/test_mlflow.py +++ b/tests/loggers/test_mlflow.py @@ -13,11 +13,10 @@ # limitations under the License. import importlib.util import os - from unittest import mock from unittest.mock import MagicMock -import pytest +import pytest from pytorch_lightning import Trainer from pytorch_lightning.loggers import _MLFLOW_AVAILABLE, MLFlowLogger @@ -113,9 +112,11 @@ def test_mlflow_log_dir(client, mlflow, tmpdir): limit_train_batches=1, limit_val_batches=3, ) + assert trainer.log_dir == logger.save_dir trainer.fit(model) assert trainer.checkpoint_callback.dirpath == (tmpdir / "exp-id" / "run-id" / 'checkpoints') assert set(os.listdir(trainer.checkpoint_callback.dirpath)) == {'epoch=0-step=0.ckpt'} + assert trainer.log_dir == logger.save_dir def test_mlflow_logger_dirs_creation(tmpdir): diff --git a/tests/loggers/test_neptune.py b/tests/loggers/test_neptune.py index 661baf810cba7..d8b8ee7aafacf 100644 --- a/tests/loggers/test_neptune.py +++ b/tests/loggers/test_neptune.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import patch, MagicMock +from unittest.mock import MagicMock, patch import torch @@ -114,7 +114,9 @@ def _run_training(logger): limit_train_batches=0.05, logger=logger, ) + assert trainer.log_dir is None trainer.fit(model) + assert trainer.log_dir is None return logger logger_close_after_fit = _run_training(NeptuneLogger(offline_mode=True)) diff --git a/tests/loggers/test_tensorboard.py b/tests/loggers/test_tensorboard.py index 88b4069676330..1e82f9c7718c7 100644 --- a/tests/loggers/test_tensorboard.py +++ b/tests/loggers/test_tensorboard.py @@ -35,9 +35,11 @@ def test_tensorboard_hparams_reload(tmpdir): model = EvalModelTemplate() trainer = Trainer(max_epochs=1, default_root_dir=tmpdir) + assert trainer.log_dir == trainer.logger.log_dir trainer.fit(model) - folder_path = trainer.logger.log_dir + assert trainer.log_dir == trainer.logger.log_dir + folder_path = trainer.log_dir # make sure yaml is there with open(os.path.join(folder_path, "hparams.yaml")) as file: diff --git a/tests/loggers/test_wandb.py b/tests/loggers/test_wandb.py index a44b19ca39270..c297633e9a516 100644 --- a/tests/loggers/test_wandb.py +++ b/tests/loggers/test_wandb.py @@ -13,13 +13,16 @@ # limitations under the License. import os import pickle -from unittest import mock -from argparse import ArgumentParser import types +from argparse import ArgumentParser +from unittest import mock + +import pytest from pytorch_lightning import Trainer from pytorch_lightning.loggers import WandbLogger -from tests.base import EvalModelTemplate, BoringModel +from pytorch_lightning.utilities.exceptions import MisconfigurationException +from tests.base import BoringModel, EvalModelTemplate def get_warnings(recwarn): @@ -94,6 +97,7 @@ class Experiment: """ """ id = 'the_id' step = 0 + dir = 'wandb' def project_name(self): return 'the_project_name' @@ -109,6 +113,7 @@ def project_name(self): ) # Access the experiment to ensure it's created assert trainer.logger.experiment, 'missing experiment' + assert trainer.log_dir == logger.save_dir pkl_bytes = pickle.dumps(trainer) trainer2 = pickle.loads(pkl_bytes) @@ -147,11 +152,13 @@ def test_wandb_logger_dirs_creation(wandb, tmpdir): version = logger.version model = EvalModelTemplate() - trainer = Trainer(default_root_dir=tmpdir, logger=logger, max_epochs=1, limit_val_batches=3) + trainer = Trainer(default_root_dir=tmpdir, logger=logger, max_epochs=1, limit_val_batches=3, log_every_n_steps=1) + assert trainer.log_dir == logger.save_dir trainer.fit(model) assert trainer.checkpoint_callback.dirpath == str(tmpdir / 'project' / version / 'checkpoints') assert set(os.listdir(trainer.checkpoint_callback.dirpath)) == {'epoch=0-step=9.ckpt'} + assert trainer.log_dir == logger.save_dir def test_wandb_sanitize_callable_params(tmpdir): @@ -182,3 +189,10 @@ def wrapper_something(): assert params["something"] == "something" assert params["wrapper_something"] == "wrapper_something" assert params["wrapper_something_wo_name"] == "" + + +@mock.patch('pytorch_lightning.loggers.wandb.wandb') +def test_wandb_logger_offline_log_model(wandb, tmpdir): + """ Test that log_model=True raises an error in offline mode """ + with pytest.raises(MisconfigurationException, match='checkpoints cannot be uploaded in offline mode'): + logger = WandbLogger(save_dir=str(tmpdir), offline=True, log_model=True) diff --git a/tests/trainer/properties/log_dir.py b/tests/trainer/properties/log_dir.py index 021bb04a7c917..2bc5c06955b69 100644 --- a/tests/trainer/properties/log_dir.py +++ b/tests/trainer/properties/log_dir.py @@ -12,114 +12,135 @@ # See the License for the specific language governing permissions and # limitations under the License. import os -import torch + import pytest -from tests.base.boring_model import BoringModel, RandomDataset +import torch + from pytorch_lightning import Trainer +from pytorch_lightning.callbacks import ModelCheckpoint +from pytorch_lightning.loggers import TensorBoardLogger from pytorch_lightning.utilities import APEX_AVAILABLE from pytorch_lightning.utilities.exceptions import MisconfigurationException +from tests.base.boring_model import BoringModel, RandomDataset + + +class TestModel(BoringModel): + def __init__(self, expected_log_dir): + super().__init__() + self.expected_log_dir = expected_log_dir + + def training_step(self, *args, **kwargs): + assert self.trainer.log_dir == self.expected_log_dir + return super().training_step(*args, **kwargs) def test_logdir(tmpdir): """ Tests that the path is correct when checkpoint and loggers are used """ - class TestModel(BoringModel): - def training_step(self, batch, batch_idx): - output = self.layer(batch) - loss = self.loss(batch, output) + expected = os.path.join(tmpdir, 'lightning_logs', 'version_0') - expected = os.path.join(self.trainer.default_root_dir, 'lightning_logs', 'version_0') - assert self.trainer.log_dir == expected - return {"loss": loss} + model = TestModel(expected) - model = TestModel() - - limit_train_batches = 2 trainer = Trainer( default_root_dir=tmpdir, - limit_train_batches=limit_train_batches, - limit_val_batches=2, - max_epochs=1, + max_steps=2, + callbacks=[ModelCheckpoint(dirpath=tmpdir)], ) + assert trainer.log_dir == expected trainer.fit(model) + assert trainer.log_dir == expected def test_logdir_no_checkpoint_cb(tmpdir): """ Tests that the path is correct with no checkpoint """ - class TestModel(BoringModel): - def training_step(self, batch, batch_idx): - output = self.layer(batch) - loss = self.loss(batch, output) - expected = os.path.join(self.trainer.default_root_dir, 'lightning_logs', 'version_0') - assert self.trainer.log_dir == expected - return {"loss": loss} - - model = TestModel() + expected = os.path.join(tmpdir, 'lightning_logs', 'version_0') + model = TestModel(expected) - limit_train_batches = 2 trainer = Trainer( default_root_dir=tmpdir, - limit_train_batches=limit_train_batches, - limit_val_batches=2, - max_epochs=1, + max_steps=2, checkpoint_callback=False ) + assert trainer.log_dir == expected trainer.fit(model) + assert trainer.log_dir == expected def test_logdir_no_logger(tmpdir): """ Tests that the path is correct even when there is no logger """ - class TestModel(BoringModel): - def training_step(self, batch, batch_idx): - output = self.layer(batch) - loss = self.loss(batch, output) - expected = os.path.join(self.trainer.default_root_dir) - assert self.trainer.log_dir == expected - return {"loss": loss} + expected = os.path.join(tmpdir) + model = TestModel(expected) - model = TestModel() - - limit_train_batches = 2 trainer = Trainer( default_root_dir=tmpdir, - limit_train_batches=limit_train_batches, - limit_val_batches=2, - max_epochs=1, + max_steps=2, logger=False, + callbacks=[ModelCheckpoint(dirpath=tmpdir)], ) + assert trainer.log_dir == expected trainer.fit(model) + assert trainer.log_dir == expected def test_logdir_no_logger_no_checkpoint(tmpdir): """ Tests that the path is correct even when there is no logger """ - class TestModel(BoringModel): - def training_step(self, batch, batch_idx): - output = self.layer(batch) - loss = self.loss(batch, output) - expected = os.path.join(self.trainer.default_root_dir) - assert self.trainer.log_dir == expected - return {"loss": loss} - - model = TestModel() + expected = os.path.join(tmpdir) + model = TestModel(expected) - limit_train_batches = 2 trainer = Trainer( default_root_dir=tmpdir, - limit_train_batches=limit_train_batches, - limit_val_batches=2, - max_epochs=1, + max_steps=2, logger=False, checkpoint_callback=False ) + assert trainer.log_dir == expected + trainer.fit(model) + assert trainer.log_dir == expected + + +def test_logdir_custom_callback(tmpdir): + """ + Tests that the path is correct even when there is a custom callback + """ + expected = os.path.join(tmpdir, 'lightning_logs', 'version_0') + model = TestModel(expected) + + trainer = Trainer( + default_root_dir=tmpdir, + max_steps=2, + callbacks=[ModelCheckpoint(dirpath=os.path.join(tmpdir, 'ckpts'))], + ) + + assert trainer.log_dir == expected + trainer.fit(model) + assert trainer.log_dir == expected + + +def test_logdir_custom_logger(tmpdir): + """ + Tests that the path is correct even when there is a custom logger + """ + expected = os.path.join(tmpdir, 'custom_logs', 'version_0') + model = TestModel(expected) + + trainer = Trainer( + default_root_dir=tmpdir, + max_steps=2, + callbacks=[ModelCheckpoint(dirpath=tmpdir)], + logger=TensorBoardLogger(save_dir=tmpdir, name='custom_logs') + ) + + assert trainer.log_dir == expected trainer.fit(model) + assert trainer.log_dir == expected