forked from SarahwXU/HiSup
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsolver.py
44 lines (36 loc) · 1.71 KB
/
solver.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import torch
import math
def make_optimizer(cfg, model):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr=cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if cfg.SOLVER.OPTIMIZER == 'SGD':
optimizer = torch.optim.SGD(params,
cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
weight_decay=cfg.SOLVER.WEIGHT_DECAY)
elif cfg.SOLVER.OPTIMIZER == 'ADAM' or 'ADAMcos':
optimizer = torch.optim.Adam(params, cfg.SOLVER.BASE_LR,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
amsgrad=cfg.SOLVER.AMSGRAD)
elif cfg.SOLVER.OPTIMIZER == 'ADAMW':
optimizer = torch.optim.AdamW(params, cfg.SOLVER.BASE_LR,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
amsgrad=cfg.SOLVER.AMSGRAD)
else:
raise NotImplementedError()
return optimizer
def make_lr_scheduler(cfg, optimizer):
if cfg.SOLVER.OPTIMIZER == 'ADAMcos':
t = cfg.SOLVER.STATIC_STEP
max_ep = cfg.SOLVER.MAX_EPOCH
lambda1 = lambda epoch: 1 if epoch < t else 0.00001 \
if 0.5*(1+math.cos(math.pi*(epoch-t)/(max_ep-t))) < 0.00001 \
else 0.5*(1+math.cos(math.pi*(epoch-t)/(max_ep-t)))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)
else:
return torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.SOLVER.STEPS, gamma=cfg.SOLVER.GAMMA)