-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathutilies.py
executable file
·118 lines (99 loc) · 3.27 KB
/
utilies.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import numpy as np
import torch
import random
import os
import scipy
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
speed_file = 'speed.txt'
def distance_metrics(gt, preds):
errors = np.zeros(preds.shape[:-1])
for i in range(errors.shape[0]):
for j in range(errors.shape[1]):
errors[i, j] = scipy.spatial.distance.euclidean(gt[i, j], preds[i, j])
return errors.mean(), errors[:, -1].mean(), errors
def write_data(data,file_name):
with open(file_name, 'w') as writer:
for b_idx,i in enumerate(data):
for t_ind, j in enumerate(i):
for v_ind, k in enumerate(j):
result = 'batch = {}, time = {}, node = {}, x = '.format(
b_idx,t_ind,v_ind)+' '.join(k.astype(str)) + '\n'
writer.write(result)
def seed_torch(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def subsequent_mask(size):
"""
Mask out subsequent positions.
"""
attn_shape = (1, size, size)
mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(mask) == 0
class NoamOpt(object):
"""
Optim wrapper that implements rate.
"""
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
self.epoch=0
self.best_mad=np.inf
self.best_fad=np.inf
def step(self):
"""
Update parameters and rate
"""
self._step += 1
# print(self._step)
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step=None):
"""
Implement `lrate` above
"""
if step is None:
step = self._step
# print(self.factor)
# print(self.model_size)
# print(self.warmup)
return self.factor * (self.model_size ** -0.5) * min(step ** -0.5, step * self.warmup ** -1.5)
def get_std_opt(model):
return NoamOpt(model.src_embed[0].d_model, 2, 4000,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
else:
raise RuntimeError("activation should be relu/gelu, not %s." % activation)
def conv_init(conv):
nn.init.kaiming_normal_(conv.weight, mode='fan_out')
nn.init.constant_(conv.bias, 0)
def bn_init(bn, scale):
nn.init.constant_(bn.weight, scale)
nn.init.constant_(bn.bias, 0)