-
Notifications
You must be signed in to change notification settings - Fork 1
/
utils.py
78 lines (65 loc) · 1.98 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import random
import yaml
from easydict import EasyDict
import numpy as np
import torch
def load_config(config_path):
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
return EasyDict(config)
def seed_all(seed):
if seed is None:
return
# torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def recursive_to_device(x, device):
if isinstance(x, torch.Tensor):
return x.to(device)
elif isinstance(x, dict):
return {k: recursive_to_device(v, device) for k, v in x.items()}
elif isinstance(x, (list, tuple)):
return [recursive_to_device(v, device) for v in x]
else:
return x
def get_optimizer(cfg, model):
if cfg.type == 'adam':
return torch.optim.Adam(
model.parameters(),
lr=cfg.lr,
weight_decay=cfg.weight_decay,
betas=(cfg.beta1, cfg.beta2,)
)
else:
raise NotImplementedError(f'Optimizer not supported: {cfg.type}')
def get_scheduler(cfg, optimizer):
if cfg.type == 'plateau':
return torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
factor=cfg.factor,
patience=cfg.patience,
min_lr=cfg.min_lr,
)
elif cfg.type == 'step':
return torch.optim.lr_scheduler.StepLR(
optimizer,
step_size=cfg.step_size,
gamma=cfg.gamma,
)
elif cfg.type == 'multistep':
return torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=cfg.milestones,
gamma=cfg.gamma,
)
elif cfg.type == 'exp':
return torch.optim.lr_scheduler.ExponentialLR(
optimizer,
gamma=cfg.gamma,
)
else:
raise NotImplementedError(f'Scheduler not supported: {cfg.type}')
def count_parameters(model):
return sum(p.numel() for p in model.parameters())