-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathddp_tutorial.py
45 lines (40 loc) · 1.31 KB
/
ddp_tutorial.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import os
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel as DDP
def example(rank, world_size):
# create default process group
dist.init_process_group("gloo", rank=rank, world_size=world_size)
# create local model
model = nn.Linear(10, 10).to(rank)
# construct DDP model
ddp_model = DDP(model, device_ids=[rank])
# define loss function and optimizer
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)
# forward pass
input = torch.randn(20, 10).to(rank)
outputs = ddp_model(input)
labels = torch.randn(20, 10).to(rank)
loss = loss_fn(outputs, labels)
print('loss: {}, rank: {}, torch.cuda.get_device_name: {}'.format(loss.item(), rank, torch.cuda.get_device_name()))
# backward pass
loss.backward()
# update parameters
optimizer.step()
def main():
world_size = 3
mp.spawn(example,
args=(world_size,),
nprocs=world_size,
join=True)
if __name__=="__main__":
# Environment variables which need to be
# set when using c10d's default "env"
# initialization mode.
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29500"
main()