From 7c00c4cc41ebf57f0e639d4f71b48e2d5f6c4543 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Fri, 2 Oct 2020 06:22:51 -0400 Subject: [PATCH] ref: part 4 of #3733 --- pytorch_lightning/accelerators/ddp_backend.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pytorch_lightning/accelerators/ddp_backend.py b/pytorch_lightning/accelerators/ddp_backend.py index b144648e4754d..11311bfac21b5 100644 --- a/pytorch_lightning/accelerators/ddp_backend.py +++ b/pytorch_lightning/accelerators/ddp_backend.py @@ -200,7 +200,7 @@ def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0 # MODEL # copy model to each gpu - self.model_to_device(model, process_idx, is_master) + self.model_to_device(model, process_idx) # CHOOSE OPTIMIZER # allow for lr schedulers as well @@ -260,11 +260,9 @@ def set_world_ranks(self, process_idx): self.trainer.global_rank = self.trainer.node_rank * self.trainer.num_processes + process_idx self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes - def model_to_device(self, model, process_idx, is_master): + def model_to_device(self, model, process_idx): gpu_idx = int(os.environ.get('PL_DDP_PID', process_idx)) - gpu_idx = int(os.environ.get('PL_DDP_PID', gpu_idx)) - self.trainer.root_gpu = gpu_idx torch.cuda.set_device(self.trainer.root_gpu) model.cuda(self.trainer.root_gpu)