diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e889029814c8..97c5f13775dcc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -170,7 +170,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Fixed -- Set better defaults for `rank_zero_only.rank` when training is launched with SLURM and torchelastic ([#6802](https://github.com/PyTorchLightning/pytorch-lightning/pull/6802/)) +- Set better defaults for `rank_zero_only.rank` when training is launched with SLURM and torchelastic: + * Support SLURM and torchelastic global rank environment variables ([#5715](https://github.com/PyTorchLightning/pytorch-lightning/pull/5715)) + * Remove hardcoding of local rank in accelerator connector ([#6878](https://github.com/PyTorchLightning/pytorch-lightning/pull/6878)) - Sanitize `None` params during pruning ([#6836](https://github.com/PyTorchLightning/pytorch-lightning/pull/6836)) diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index c53db011d837a..e49d524ec48e9 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -53,7 +53,6 @@ device_parser, DeviceType, DistributedType, - rank_zero_only, ) from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn from pytorch_lightning.utilities.exceptions import MisconfigurationException @@ -106,12 +105,6 @@ def __init__( self._training_type_plugin: Optional[TrainingTypePlugin] = None self._cluster_environment: Optional[ClusterEnvironment] = None - # init the default rank if exists - # we need to call this here or NVIDIA flags and other messaging in init will show on all ranks - # this way we only show it on rank 0 - if "LOCAL_RANK" in os.environ: - rank_zero_only.rank = int(os.environ["LOCAL_RANK"]) - # for gpus allow int, string and gpu list if auto_select_gpus and isinstance(gpus, int): self.gpus = pick_multiple_gpus(gpus)