diff --git a/CHANGELOG.md b/CHANGELOG.md index 38d97c984c6da..6eed2a9a1388a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,7 +103,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Changed the default of `find_unused_parameters` to `False` in DDP ([#5185](https://github.com/PyTorchLightning/pytorch-lightning/pull/5185)) -- Changed `ModelCheckpoint` version suffixes to start at 1 ([5008](https://github.com/PyTorchLightning/pytorch-lightning/pull/5008)) +- Changed `ModelCheckpoint` version suffixes to start at 1 ([#5008](https://github.com/PyTorchLightning/pytorch-lightning/pull/5008)) + + +- Progress bar metrics tensors are now converted to float ([#5692](https://github.com/PyTorchLightning/pytorch-lightning/pull/5692)) - Changed the default value for the `progress_bar_refresh_rate` Trainer argument in Google COLAB notebooks to 20 ([#5516](https://github.com/PyTorchLightning/pytorch-lightning/pull/5516)) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 87caec7248208..439e9046726ce 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -14,7 +14,7 @@ import os from copy import deepcopy from pprint import pprint -from typing import Any, Dict, Iterable, Union +from typing import Dict, Iterable, Union import torch @@ -37,7 +37,7 @@ def __init__(self, trainer): self._callback_metrics = MetricsHolder() self._evaluation_callback_metrics = MetricsHolder(to_float=True) self._logged_metrics = MetricsHolder() - self._progress_bar_metrics = MetricsHolder() + self._progress_bar_metrics = MetricsHolder(to_float=True) self.eval_loop_results = [] self._cached_results = {stage: EpochResultStore(trainer, stage) for stage in RunningStage} self._cached_results[None] = EpochResultStore(trainer, None) @@ -88,7 +88,7 @@ def get_metrics(self, key: str) -> Dict: ) return metrics_holder.metrics - def set_metrics(self, key: str, val: Any) -> None: + def set_metrics(self, key: str, val: Dict) -> None: metrics_holder = getattr(self, f"_{key}", None) metrics_holder.reset(val) diff --git a/tests/callbacks/test_progress_bar.py b/tests/callbacks/test_progress_bar.py index d8cf23c1105f0..5f861d7a2cce9 100644 --- a/tests/callbacks/test_progress_bar.py +++ b/tests/callbacks/test_progress_bar.py @@ -16,6 +16,7 @@ from unittest.mock import call, Mock import pytest +import torch from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint, ProgressBar, ProgressBarBase @@ -349,3 +350,27 @@ def test_test_progress_bar_update_amount(tmpdir, test_batches, refresh_rate, tes ) trainer.test(model) progress_bar.test_progress_bar.update.assert_has_calls([call(delta) for delta in test_deltas]) + + +def test_tensor_to_float_conversion(tmpdir): + """Check tensor gets converted to float""" + + class TestModel(BoringModel): + + def training_step(self, batch, batch_idx): + self.log('foo', torch.tensor(0.123), prog_bar=True) + self.log('bar', {"baz": torch.tensor([1])}, prog_bar=True) + return super().training_step(batch, batch_idx) + + trainer = Trainer( + default_root_dir=tmpdir, + max_epochs=1, + limit_train_batches=2, + logger=False, + checkpoint_callback=False, + ) + trainer.fit(TestModel()) + + pbar = trainer.progress_bar_callback.main_progress_bar + actual = str(pbar.postfix) + assert actual.endswith("foo=0.123, bar={'baz': tensor([1])}")