-
Notifications
You must be signed in to change notification settings - Fork 83
/
Copy pathruntime.py
122 lines (102 loc) · 3.15 KB
/
runtime.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import gpustat
import numpy as np
from problems import Flatten
def gpu_mem():
stats = gpustat.GPUStatCollection.new_query()
for gpu in stats:
util = gpu.entry['memory.used']
break
return util
# random points at least 2r apart
m = 10
np.random.seed(3)
x = [np.random.uniform(size=(1,28,28))]
r = 0.16
while(len(x) < m):
p = np.random.uniform(size=(1,28,28))
if min(np.abs(p-a).sum() for a in x) > 2*r:
x.append(p)
# r = 0.145
epsilon = r/2
X = torch.Tensor(np.array(x)).cuda()
torch.manual_seed(1)
y = (torch.rand(m)+0.5).long().cuda()
import sys
sys.path.append("../")
from convex_adversarial import robust_loss
import time
class Meter:
def __init__(self):
self.l = [[]]
def add(self, x):
self.l[-1].append(x)
def next(self):
self.l.append([])
def save(self, fname):
x = np.array(self.l[:-1])
np.savetxt(fname, x)
xs, ys = Meter(), Meter()
mems = Meter()
PROJ = True
for j in range(1,1001):
try:
for _ in range(10):
torch.cuda.empty_cache()
start_mem = gpu_mem()
# torch.manual_seed(1)
robust_net = nn.Sequential(
nn.Conv2d(1, j, 3, stride=1, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(j*28*28,2)
).cuda()
data = []
opt = optim.Adam(robust_net.parameters(), lr=1e-3)
ts = []
for i in range(10):
start_time = time.time()
if PROJ:
robust_ce, robust_err = robust_loss(robust_net, epsilon, X, y,
parallel=False, l1_proj=50, l1_type='median')
else:
robust_ce, robust_err = robust_loss(robust_net, epsilon, X, y,
parallel=False)
out = robust_net(X)
l2 = nn.CrossEntropyLoss()(out, y).item()
err = (out.max(1)[1] != y).float().mean().item()
data.append([l2, robust_ce.item(), err, robust_err])
# if i % 100 == 0:
# print(robust_ce.item(), robust_err)
opt.zero_grad()
(robust_ce).backward()
opt.step()
end_time = time.time()
ts.append(end_time-start_time)
end_mem = gpu_mem()
mems.add(end_mem)
# print(start_mem, end_mem)
del robust_net, robust_ce, l2, robust_err, err, out, opt
# print(globals().keys())
# assert False
# print(l2, robust_ce.item(), robust_err)
ts = np.array(ts[1:])
xs.add(j*100*4)
ys.add(ts.mean())
print(j*28*28, ts.mean(), end_mem)
mems.next()
xs.next()
ys.next()
except:
break
if PROJ:
xs.save('sizes_conv_proj.txt')
ys.save('epoch_conv_times_proj.txt')
mems.save('memory_conv_proj.txt')
else:
xs.save('sizes_conv_full.txt')
ys.save('epoch_conv_times_full.txt')
mems.save('memory_conv_full.txt')