-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathcuda_try.py
34 lines (27 loc) · 886 Bytes
/
cuda_try.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import torch
import torch.nn as nn
import timeit
print("Beginning..")
t0 = timeit.default_timer()
if torch.cuda.is_available():
torch.cuda.manual_seed(2809)
torch.backends.cudnn.deterministic = True
device = torch.device('cuda:1')
ngpus = torch.cuda.device_count()
print("Using {} GPU(s)...".format(ngpus))
print("Setup takes {:.2f}".format(timeit.default_timer()-t0))
t1 = timeit.default_timer()
model = nn.Sequential(
nn.Conv2d(3, 6, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(6, 1, 3, 1, 1)
)
print("Model init takes {:.2f}".format(timeit.default_timer()-t1))
if torch.cuda.is_available():
t2 = timeit.default_timer()
model = model.to(device)
print("Model to device takes {:.2f}".format(timeit.default_timer()-t2))
t3 = timeit.default_timer()
torch.cuda.synchronize()
print("Cuda Synch takes {:.2f}".format(timeit.default_timer()-t3))
print('done')