From 4a9aa48dc048f19f075c21ff81248a5c8fe68321 Mon Sep 17 00:00:00 2001 From: edenlightning <66261195+edenlightning@users.noreply.github.com> Date: Wed, 24 Feb 2021 15:13:48 -0500 Subject: [PATCH] Update gpu warning (#6181) Co-authored-by: Jirka Borovec Co-authored-by: Kaushik Bokka --- .../trainer/connectors/accelerator_connector.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index 7021081d6cc90..5066dcf9e524a 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -544,7 +544,10 @@ def set_distributed_mode(self, distributed_backend: Optional[str] = None): rank_zero_info(f'TPU available: {_TPU_AVAILABLE}, using: {num_cores} TPU cores') if torch.cuda.is_available() and self._device_type != DeviceType.GPU: - rank_zero_warn("GPU available but not used. Set the --gpus flag when calling the script.") + rank_zero_warn( + "GPU available but not used. Set the gpus flag in your trainer" + " `Trainer(gpus=1)` or script `--gpus=1`." + ) def _set_horovod_backend(self): self.check_horovod()