Skip to content

Commit

Permalink
ref: part 4 of #3733
Browse files Browse the repository at this point in the history
  • Loading branch information
williamFalcon committed Oct 2, 2020
1 parent 434a328 commit 7c00c4c
Showing 1 changed file with 2 additions and 4 deletions.
6 changes: 2 additions & 4 deletions pytorch_lightning/accelerators/ddp_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0

# MODEL
# copy model to each gpu
self.model_to_device(model, process_idx, is_master)
self.model_to_device(model, process_idx)

# CHOOSE OPTIMIZER
# allow for lr schedulers as well
Expand Down Expand Up @@ -260,11 +260,9 @@ def set_world_ranks(self, process_idx):
self.trainer.global_rank = self.trainer.node_rank * self.trainer.num_processes + process_idx
self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes

def model_to_device(self, model, process_idx, is_master):
def model_to_device(self, model, process_idx):
gpu_idx = int(os.environ.get('PL_DDP_PID', process_idx))

gpu_idx = int(os.environ.get('PL_DDP_PID', gpu_idx))

self.trainer.root_gpu = gpu_idx
torch.cuda.set_device(self.trainer.root_gpu)
model.cuda(self.trainer.root_gpu)
Expand Down

0 comments on commit 7c00c4c

Please sign in to comment.