Skip to content

Commit

Permalink
deprecate flush_logs_every_n_steps on Trainer (#9366)
Browse files Browse the repository at this point in the history
  • Loading branch information
edward-io authored Sep 14, 2021
1 parent ec828b8 commit c784092
Show file tree
Hide file tree
Showing 7 changed files with 47 additions and 8 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Deprecated passing `process_position` to the `Trainer` constructor in favor of adding the `ProgressBar` callback with `process_position` directly to the list of callbacks ([#9222](https://github.com/PyTorchLightning/pytorch-lightning/pull/9222))


- Deprecated passing `flush_logs_every_n_steps` as a Trainer argument, instead pass it to the logger init if supported ([#9366](https://github.com/PyTorchLightning/pytorch-lightning/pull/9366))



### Removed

Expand Down
7 changes: 6 additions & 1 deletion pytorch_lightning/loggers/csv_logs.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ class CSVLogger(LightningLoggerBase):
version: Experiment version. If version is not specified the logger inspects the save
directory for existing versions, then automatically assigns the next available version.
prefix: A string to put at the beginning of metric keys.
flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps).
"""

LOGGER_JOIN_CHAR = "-"
Expand All @@ -128,13 +129,15 @@ def __init__(
name: Optional[str] = "default",
version: Optional[Union[int, str]] = None,
prefix: str = "",
flush_logs_every_n_steps: int = 100,
):
super().__init__()
self._save_dir = save_dir
self._name = name or ""
self._version = version
self._prefix = prefix
self._experiment = None
self._flush_logs_every_n_steps = flush_logs_every_n_steps

@property
def root_dir(self) -> str:
Expand All @@ -154,7 +157,7 @@ def log_dir(self) -> str:
By default, it is named ``'version_${self.version}'`` but it can be overridden by passing a string value for the
constructor's version parameter instead of ``None`` or an int.
"""
# create a pseudo standard path ala test-tube
# create a pseudo standard path
version = self.version if isinstance(self.version, str) else f"version_{self.version}"
log_dir = os.path.join(self.root_dir, version)
return log_dir
Expand Down Expand Up @@ -197,6 +200,8 @@ def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
metrics = self._add_prefix(metrics)
self.experiment.log_metrics(metrics, step)
if step is not None and (step + 1) % self._flush_logs_every_n_steps == 0:
self.save()

@rank_zero_only
def save(self) -> None:
Expand Down
12 changes: 7 additions & 5 deletions pytorch_lightning/loggers/tensorboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,17 +64,19 @@ class TensorBoardLogger(LightningLoggerBase):
directory for existing versions, then automatically assigns the next available version.
If it is a string then it is used as the run-specific subdirectory name,
otherwise ``'version_${version}'`` is used.
sub_dir: Sub-directory to group TensorBoard logs. If a sub_dir argument is passed
then logs are saved in ``/save_dir/version/sub_dir/``. Defaults to ``None`` in which
logs are saved in ``/save_dir/version/``.
log_graph: Adds the computational graph to tensorboard. This requires that
the user has defined the `self.example_input_array` attribute in their
model.
default_hp_metric: Enables a placeholder metric with key `hp_metric` when `log_hyperparams` is
called without a metric (otherwise calls to log_hyperparams without a metric are ignored).
prefix: A string to put at the beginning of metric keys.
\**kwargs: Additional arguments like `comment`, `filename_suffix`, etc. used by
:class:`SummaryWriter` can be passed as keyword arguments in this logger.
sub_dir: Sub-directory to group TensorBoard logs. If a sub_dir argument is passed
then logs are saved in ``/save_dir/version/sub_dir/``. Defaults to ``None`` in which
logs are saved in ``/save_dir/version/``.
\**kwargs: Additional arguments used by :class:`SummaryWriter` can be passed as keyword
arguments in this logger. To automatically flush to disk, `max_queue` sets the size
of the queue for pending logs before flushing. `flush_secs` determines how many seconds
elapses before flushing.
"""
NAME_HPARAMS_FILE = "hparams.yaml"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from pytorch_lightning.utilities import DeviceType, memory
from pytorch_lightning.utilities.apply_func import apply_to_collection, move_data_to_device
from pytorch_lightning.utilities.metrics import metrics_to_scalars
from pytorch_lightning.utilities.warnings import rank_zero_deprecation


class LoggerConnector:
Expand All @@ -44,11 +45,18 @@ def __init__(self, trainer: "pl.Trainer", log_gpu_memory: Optional[str] = None)
def on_trainer_init(
self,
logger: Union[bool, LightningLoggerBase, Iterable[LightningLoggerBase]],
flush_logs_every_n_steps: int,
flush_logs_every_n_steps: Optional[int],
log_every_n_steps: int,
move_metrics_to_cpu: bool,
) -> None:
self.configure_logger(logger)
if flush_logs_every_n_steps is not None:
rank_zero_deprecation(
f"Setting `Trainer(flush_logs_every_n_steps={flush_logs_every_n_steps})` is deprecated in v1.5 "
"and will be removed in v1.7. Please configure flushing in the logger instead."
)
else:
flush_logs_every_n_steps = 100 # original default parameter
self.trainer.flush_logs_every_n_steps = flush_logs_every_n_steps
self.trainer.log_every_n_steps = log_every_n_steps
self.trainer.move_metrics_to_cpu = move_metrics_to_cpu
Expand Down
6 changes: 5 additions & 1 deletion pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def __init__(
limit_test_batches: Union[int, float] = 1.0,
limit_predict_batches: Union[int, float] = 1.0,
val_check_interval: Union[int, float] = 1.0,
flush_logs_every_n_steps: int = 100,
flush_logs_every_n_steps: Optional[int] = None,
log_every_n_steps: int = 50,
accelerator: Optional[Union[str, Accelerator]] = None,
sync_batchnorm: bool = False,
Expand Down Expand Up @@ -213,6 +213,10 @@ def __init__(
flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps).
.. deprecated:: v1.5
``flush_logs_every_n_steps`` has been deprecated in v1.5 and will be removed in v1.7.
Please configure flushing directly in the logger instead.
gpus: Number of GPUs to train on (int) or which GPUs to train on (list or str) applied per node
gradient_clip_val: The value at which to clip gradients. Passing ``gradient_clip_val=0`` disables gradient
Expand Down
5 changes: 5 additions & 0 deletions tests/deprecated_api/test_remove_1-7.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,11 @@ def test_v1_7_0_process_position_trainer_constructor(tmpdir):
_ = Trainer(process_position=5)


def test_v1_7_0_flush_logs_every_n_steps_trainer_constructor(tmpdir):
with pytest.deprecated_call(match=r"Setting `Trainer\(flush_logs_every_n_steps=10\)` is deprecated in v1.5"):
_ = Trainer(flush_logs_every_n_steps=10)


class BoringCallbackDDPSpawnModel(BoringModel):
def __init__(self):
super().__init__()
Expand Down
12 changes: 12 additions & 0 deletions tests/loggers/test_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
# limitations under the License.
import os
from argparse import Namespace
from unittest.mock import MagicMock

import pytest
import torch
Expand Down Expand Up @@ -103,3 +104,14 @@ def test_file_logger_log_hyperparams(tmpdir):
path_yaml = os.path.join(logger.log_dir, ExperimentWriter.NAME_HPARAMS_FILE)
params = load_hparams_from_yaml(path_yaml)
assert all(n in params for n in hparams)


def test_flush_n_steps(tmpdir):
logger = CSVLogger(tmpdir, flush_logs_every_n_steps=2)
metrics = {"float": 0.3, "int": 1, "FloatTensor": torch.tensor(0.1), "IntTensor": torch.tensor(1)}
logger.save = MagicMock()
logger.log_metrics(metrics, step=0)

logger.save.assert_not_called()
logger.log_metrics(metrics, step=1)
logger.save.assert_called_once()

0 comments on commit c784092

Please sign in to comment.