This repository was archived by the owner on Jan 26, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathvat.py
More file actions
executable file
·52 lines (38 loc) · 1.91 KB
/
vat.py
File metadata and controls
executable file
·52 lines (38 loc) · 1.91 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import tensorflow as tf
# import sys; sys.path.append("../")
from u_net import create_conv_net
epsilon = .1
num_power_iterations = 1
xi = 1e-6
def entropy_min(logit):
p = tf.nn.softmax(logit)
return -tf.reduce_mean(tf.reduce_sum(p * logsoftmax(logit), [1, 2]))
def logsoftmax(x):
xdev = x - tf.reduce_max(x, [1, 2], keepdims=True)
lsm = xdev - tf.log(tf.reduce_sum(tf.exp(xdev), [1, 2], keepdims=True))
return lsm
def get_normalized_vector(d):
# print(d.get_shape())
# d /= (1e-12 + tf.reduce_max(tf.abs(d), range(1, len(d.get_shape())), keep_dims=True))
d /= (1e-12 + tf.reduce_max(tf.abs(d), axis=[1, 2], keepdims=True))
# d /= tf.sqrt(1e-6 + tf.reduce_sum(tf.pow(d, 2.0), range(1, len(d.get_shape())), keep_dims=True))
d /= tf.sqrt(1e-6 + tf.reduce_sum(tf.pow(d, 2.0), axis=[1, 2], keepdims=True))
return d
def generate_virtual_adversarial_perturbation(x, logit, is_training=True, alpha=None):
d = tf.random_normal(shape=tf.shape(x))
for _ in range(num_power_iterations):
d = xi * get_normalized_vector(d)
logit_p = logit
logit_m, _ = create_conv_net(x + d, .9, 3, 9, is_training=is_training, features_root=64, alpha=alpha)
dist = tf.losses.mean_squared_error(logit_p, logit_m)
grad = tf.gradients(dist, [d], aggregation_method=2)[0]
d = tf.stop_gradient(grad)
return epsilon * get_normalized_vector(d)
def virtual_adversarial_loss(x, logit, is_training=True, name="vat_loss", alpha=None):
r_vadv = generate_virtual_adversarial_perturbation(x, logit, is_training=is_training, alpha=alpha, dist_=dist,
bs=bs)
logit = tf.stop_gradient(logit)
logit_p = logit
logit_m, _ = create_conv_net(x + r_vadv, .9, 3, 9, is_training=is_training, features_root=64, alpha=alpha)
loss = tf.losses.mean_squared_error(logit_p, logit_m)
return tf.identity(loss, name=name)