Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Gaussian Blur Fix #34

Open
wants to merge 13 commits into
base: main
Choose a base branch
from
Open
153 changes: 153 additions & 0 deletions src/algos/complex_nn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
from algos.simba_algo import SimbaDefence
import torch.nn.functional as F
import torch
import torch.nn as nn
import numpy as np
from models.complex_models import Discriminator, RealToComplex, ComplexToReal, ResNetEncoderComplex, ResNetDecoderComplex

def get_encoder_output_size(encoder, dims):
x = torch.randn((1,)+dims)
with torch.no_grad():
out = encoder(x)
if type(out) == tuple:
out = out[0]
return list(out.size())[1:]

class ComplexNN(SimbaDefence):
def __init__(self, config, utils) -> None:
super(ComplexNN, self).__init__(utils)
self.initialize(config)

def initialize(self, config):
self.optimizer_idx = 0
self.encoder_model,self.decoder_model = self.init_client_model(config)
img_size = config["img_size"]
size = get_encoder_output_size(self.encoder_model, (3,img_size,img_size))
self.discriminator = Discriminator(size=size)
models = [self.encoder_model, self.decoder_model, self.discriminator]
self.put_on_gpus(models)

self.utils.register_model("encoder_model", self.encoder_model)
self.utils.register_model("discriminator_model", self.discriminator)
self.utils.register_model("decoder_model", self.decoder_model)
self.optim_encoder , self.optim_decoder , self.optim_discriminator = self.init_optim(config, self.encoder_model, self.decoder_model, self.discriminator)

self.real_to_complex = RealToComplex()
self.complex_to_real = ComplexToReal()
self.loss_fn = F.cross_entropy
self.alpha = config["alpha"]
self.k = config["k"]

self.loss_tag = "decoder_loss"
self.acc_tag = "decoder_acc"
tags = [self.loss_tag, self.acc_tag]
for tag in tags:
self.utils.logger.register_tag("train/" + tag)
self.utils.logger.register_tag("val/" + tag)

def put_on_gpus(self,models):
for model in models:
model = self.utils.model_on_gpus(model)

def init_client_model(self, config):
if config["model_name"] == "resnet20complex":
encoder_model = ResNetEncoderComplex(3)
decoder_model = ResNetDecoderComplex(3, config["logits"], "alpha")
else:
print("can't find complex client model")
exit()

return encoder_model,decoder_model

def init_optim(self, config, encoder, decoder, discriminator):
encoder_parameters = encoder.parameters()
decoder_parameters = decoder.parameters()

if config["optimizer"] == "adam":
optimizer_e = torch.optim.Adam(encoder_parameters,
lr=config["lr"],
)

optimizer_decoder = torch.optim.Adam(decoder_parameters)

optimizer_discriminator = torch.optim.Adam(
discriminator.parameters(),
lr=config["lr"],
)
else:
print("Unknown optimizer {}".format(config["optimizer"]))
return optimizer_e,optimizer_decoder,optimizer_discriminator

def train(self):
self.mode = "train"
self.encoder_model.train()
self.decoder_model.train()

def eval(self):
self.mode = "val"
self.encoder_model.eval()
self.decoder_model.eval()

def forward(self, items):
inp = items["x"]
# Pass through encoder
a = self.encoder_model(inp)
self.a = a
# Shuffle batch elements of a to create b
with torch.no_grad():
indices = np.random.permutation(a.size(0))
b = a[indices]

self.z, self.theta = self.real_to_complex(a,b)

# Get discriminator score expectation over k rotations
self.score_fake = 0
for k in range(self.k):
# Shuffle batch to get b
indices = np.random.permutation(a.size(0))
b = a[indices]

# Rotate a
x, _ = self.real_to_complex(a,b)
a_rotated = x[:,0]
# Get discriminator score
self.score_fake += self.discriminator(a_rotated)

self.score_fake /= self.k # Average score
z = self.z.detach()
z.requires_grad = True
return z

def infer(self, h, labels):
h.retain_grad()
y = self.complex_to_real(h,self.theta)
y.retain_grad()
self.preds = self.decoder_model(y)
self.acc = (self.preds.argmax(dim=1) == labels).sum().item() / self.preds.shape[0]
self.utils.logger.add_entry(self.mode + "/" + self.acc_tag, self.acc)
if self.optimizer_idx%2 == 0:
g_loss_adv = -torch.mean(self.score_fake)
g_loss_ce = self.loss_fn(self.preds,labels)
loss = g_loss_adv + g_loss_ce
self.optim_decoder.zero_grad()
loss.backward(retain_graph=True)
self.optim_decoder.step()
self.utils.logger.add_entry(self.mode + "/" + self.loss_tag, loss.item())
return h.grad
else:
for p in self.discriminator.parameters():
p.data.clamp_(-0.01, 0.01)
self.d_loss_adv = -torch.mean(self.discriminator(self.a)) + torch.mean(self.score_fake)
self.optim_discriminator.zero_grad()
self.d_loss_adv.backward()
self.optim_discriminator.step()
return None

def backward(self, items):
if self.optimizer_idx%2 == 0:
self.optim_encoder.zero_grad()
self.z.backward(items["server_grads"])
self.optim_encoder.step()
self.optimizer_idx += 1


2 changes: 1 addition & 1 deletion src/algos/gaussian_blur.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class GaussianSmoothing(nn.Module):
def __init__(self, channels, kernel_size, sigma, device, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
kernel_size = [kernel_size*math.sqrt(sigma)] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim

Expand Down
39 changes: 38 additions & 1 deletion src/algos/maxentropy.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
from algos.simba_algo import SimbaDefence
import torch
from torchvision import models
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
torch.autograd.set_detect_anomaly(True)
distance = nn.CrossEntropyLoss()
from algos.deepobfuscator import DeepObfuscator
from utils.metrics import MetricLoader

Expand All @@ -12,6 +20,35 @@ def forward(self, input):
raise Exception('Entropy Loss takes probabilities 0<=input<=1')

input = input + 1e-16 # for numerical stability while taking log
H = torch.mean(torch.sum(input * torch.log(input), dim=0))

return H

class MaxEntropy(SimbaDefence):
def __init__(self, config, utils) -> None:
super(MaxEntropy, self).__init__(utils)
self.initialize(config, utils.device)

def initialize(self, config, device):
self.client_model = self.init_client_model(config)
self.put_on_gpus()
self.utils.register_model("client_model", self.client_model)
self.client_optim = self.init_optim(config, self.client_model)
self.entropy_loss_fn = EntropyLoss()

def forward(self, items):
x = items["x"]
self.z = self.client_model(x)
z = self.z.detach()
z.requires_grad = True
return z

def backward(self, items):
entropy_loss = self.entropy_loss_fn(items["pred_lbls"])
entropy_loss.requires_grad = True
entropy_loss.backward()
self.z.backward(items["server_grads"])
self.client_optim.step()
H = torch.mean(torch.sum(input * torch.log(input), dim=1))

return H
Expand All @@ -31,4 +68,4 @@ def update_loss(self):

def get_adv_loss(self):
# Since it is L1, it has to be minimized
return self.adv_loss
return self.adv_loss
6 changes: 3 additions & 3 deletions src/algos/simba_algo.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,9 @@ def init_optim(self, config, model):
def put_on_gpus(self):
self.client_model = self.utils.model_on_gpus(self.client_model)

def infer(self,data,labels):
pass


class SimbaAttack(nn.Module):
def __init__(self, utils):
Expand Down Expand Up @@ -101,6 +104,3 @@ def train(self):
def eval(self):
self.mode = "val"
self.model.eval()



4 changes: 4 additions & 0 deletions src/algos/supervised_decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@ def forward(self, items):
self.loss = self.loss_fn(self.x, x)
self.utils.logger.add_entry(self.mode + "/" + self.loss_tag,
self.loss.item())

return self.x

def backward(self, items):
if self.mode == "val" and self.attribute == "data":
prefix = "val/"

Expand Down
20 changes: 20 additions & 0 deletions src/configs/complex_nn.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
{
"experiment_type": "challenge",
"method": "complex_nn",
"client": {"model_name": "resnet20complex", "split_layer": 6,
"pretrained": false,"logits": 2, "optimizer": "adam", "lr": 3e-4,
"alpha": 0.99, "k":5, "img_size":32},
"server": {"model_name": "resnet20complex", "split_layer":6, "logits": 2, "pretrained": false,
"lr": 3e-4, "optimizer": "adam", "momentum": 0.99},
"learning_rate": 0.1,
"total_epochs": 150,
"training_batch_size": 128,
"dataset": "fairface",
"protected_attribute": "data",
"prediction_attribute": "gender",
"img_size": 32,
"split": false,
"test_batch_size": 32,
"exp_id": "1",
"exp_keys": ["client.alpha","client.optimizer"]
}
2 changes: 1 addition & 1 deletion src/configs/decoder_attack.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@
"train_split": 0.9,
"test_batch_size": 64,
"exp_keys": ["train_split", "adversary.loss_fn"]
}
}
2 changes: 1 addition & 1 deletion src/configs/deep_obfuscator.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@
"test_batch_size": 64,
"exp_id": "1",
"exp_keys": ["client.alpha"]
}
}
18 changes: 18 additions & 0 deletions src/configs/maxentropy.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"method": "maxentropy",
"client": {"model_name": "resnet18", "split_layer": 6,
"pretrained": false, "optimizer": "adam", "lr": 3e-4},
"server": {"model_name": "resnet18", "split_layer":6, "logits": 2, "pretrained": false,
"lr": 3e-4, "optimizer": "adam"},
"learning_rate": 0.01,
"total_epochs": 150,
"training_batch_size": 256,
"dataset": "fairface",
"protected_attribute": "data",
"prediction_attribute": "gender",
"img_size": 128,
"split": false,
"test_batch_size": 64,
"exp_id": "1",
"exp_keys": ["client.optimizer"]
}
2 changes: 1 addition & 1 deletion src/configs/nopeek.json
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
"test_batch_size": 64,
"exp_id": "1",
"exp_keys": ["client.alpha"]
}
}
3 changes: 1 addition & 2 deletions src/configs/pan.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
{
"experiment_type": "challenge",
"method": "pan",
"client": {"model_name": "resnet18", "split_layer": 6,
"pretrained": false, "optimizer": "adam", "lr": 3e-4,
Expand All @@ -18,4 +17,4 @@
"test_batch_size": 64,
"exp_id": "1",
"exp_keys": ["client.alpha"]
}
}
2 changes: 1 addition & 1 deletion src/configs/siamese_embedding.json
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
"test_batch_size": 64,
"exp_id": "1",
"exp_keys": ["client.alpha", "client.margin"]
}
}
2 changes: 1 addition & 1 deletion src/configs/uniform_noise.json
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
"test_batch_size": 64,
"exp_id": "1",
"exp_keys": ["client.distribution", "client.mean", "client.sigma"]
}
}
1 change: 1 addition & 0 deletions src/data/loaders.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import numpy as np
import torch
from torchvision import transforms
from data.dataset_utils import FairFace,Cifar10, CelebA, Cifar10_2, LFW#, UTKFace
from data.dataset_utils import FairFace, CelebA, Cifar10, LFW#, UTKFace, Cifar10_2
from data.dataset_utils import Challenge

Expand Down
7 changes: 6 additions & 1 deletion src/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,10 @@
from algos.pca_embedding import PCAEmbedding
from algos.deepobfuscator import DeepObfuscator
from algos.pan import PAN
from algos.complex_nn import ComplexNN
from algos.gaussian_blur import GaussianBlur
from algos.linear_correlation import LinearCorrelation

from algos.maxentropy import MaxEntropy
from algos.supervised_decoder import SupervisedDecoder
from algos.cloak import Cloak
from algos.shredder import Shredder
Expand Down Expand Up @@ -46,6 +47,8 @@ def load_algo(config, utils, dataloader=None):
algo = UniformNoise(config["client"], utils)
elif method == "siamese_embedding":
algo = SiameseEmbedding(config["client"], utils)
elif method == "complex_nn":
algo = ComplexNN(config["client"], utils)
elif method == "pca_embedding":
algo = PCAEmbedding(config["client"], utils)
elif method == "deep_obfuscator":
Expand All @@ -62,6 +65,8 @@ def load_algo(config, utils, dataloader=None):
algo = GaussianBlur(config["client"], utils)
elif method == "linear_correlation":
algo = LinearCorrelation(config["client"], utils)
elif method == "maxentropy":
algo = MaxEntropy(config["client"], utils)
elif method == "supervised_decoder":
item = next(iter(dataloader))
z = item["z"]
Expand Down
1 change: 0 additions & 1 deletion src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,3 @@

scheduler = Scheduler(args)
scheduler.run_job()

Loading