-
-
Notifications
You must be signed in to change notification settings - Fork 4
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Initial support for generative attacks
- Loading branch information
1 parent
6e3838d
commit 066ac8f
Showing
8 changed files
with
307 additions
and
7 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,65 @@ | ||
import torch | ||
|
||
from torchattack.generative._inference import GenerativeAttack | ||
from torchattack.generative._weights import Weights, WeightsEnum | ||
from torchattack.generative.resnet_generator import ResNetGenerator | ||
|
||
|
||
class CDAWeights(WeightsEnum): | ||
RESNET152_IMAGENET1K = Weights( | ||
url='https://github.com/spencerwooo/torchattack/releases/download/v1.0-weights/cda_res152_imagenet_0_rl.pth', | ||
) | ||
INCEPTION_V3_IMAGENET1K = Weights( | ||
url='https://github.com/spencerwooo/torchattack/releases/download/v1.0-weights/cda_incv3_imagenet_0_rl.pth', | ||
) | ||
VGG16_IMAGENET1K = Weights( | ||
url='https://github.com/spencerwooo/torchattack/releases/download/v1.0-weights/cda_vgg16_imagenet_0_rl.pth', | ||
) | ||
VGG19_IMAGENET1K = Weights( | ||
url='https://github.com/spencerwooo/torchattack/releases/download/v1.0-weights/cda_vgg19_imagenet_0_rl.pth', | ||
) | ||
DEFAULT = RESNET152_IMAGENET1K | ||
|
||
|
||
class CDA(GenerativeAttack): | ||
"""Cross-domain Attack (CDA). | ||
From the paper 'Cross-Domain Transferability of Adversarial Perturbations', | ||
https://arxiv.org/abs/1905.11736 | ||
Args: | ||
device: Device to use for tensors. Defaults to cuda if available. | ||
eps: The maximum perturbation. Defaults to 10/255. | ||
weights: Pretrained weights for the generator. Defaults to CDAWeights.DEFAULT. | ||
clip_min: Minimum value for clipping. Defaults to 0.0. | ||
clip_max: Maximum value for clipping. Defaults to 1.0. | ||
""" | ||
|
||
def __init__( | ||
self, | ||
device: torch.device | None = None, | ||
eps: float = 10 / 255, | ||
weights: CDAWeights | str | None = CDAWeights.DEFAULT, | ||
checkpoint_path: str | None = None, | ||
clip_min: float = 0.0, | ||
clip_max: float = 1.0, | ||
) -> None: | ||
super().__init__(device, eps, weights, checkpoint_path, clip_min, clip_max) | ||
|
||
def _init_generator(self) -> ResNetGenerator: | ||
generator = ResNetGenerator() | ||
# Prioritize checkpoint path over provided weights enum | ||
if self.checkpoint_path is not None: | ||
generator.load_state_dict(torch.load(self.checkpoint_path)) | ||
else: | ||
# Verify and load weights from enum if checkpoint path is not provided | ||
self.weights: CDAWeights = CDAWeights.verify(self.weights) | ||
if self.weights is not None: | ||
generator.load_state_dict(self.weights.get_state_dict(check_hash=True)) | ||
return generator.eval().to(self.device) | ||
|
||
|
||
if __name__ == '__main__': | ||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | ||
attack = CDA(device, eps=8 / 255, weights='VGG19_IMAGENET1K') | ||
print(attack) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,49 @@ | ||
from abc import abstractmethod | ||
from typing import Any | ||
|
||
import torch | ||
|
||
from torchattack._attack import Attack | ||
from torchattack.generative._weights import WeightsEnum | ||
|
||
|
||
class GenerativeAttack(Attack): | ||
def __init__( | ||
self, | ||
device: torch.device | None = None, | ||
eps: float = 10 / 255, | ||
weights: WeightsEnum | str | None = None, | ||
checkpoint_path: str | None = None, | ||
clip_min: float = 0.0, | ||
clip_max: float = 1.0, | ||
) -> None: | ||
# Generative attacks do not require specifying model and normalize. | ||
super().__init__(model=None, normalize=None, device=device) | ||
|
||
self.eps = eps | ||
self.weights = weights | ||
self.checkpoint_path = checkpoint_path | ||
self.clip_min = clip_min | ||
self.clip_max = clip_max | ||
|
||
# Initialize the generator and its weights | ||
self.generator = self._init_generator() | ||
|
||
def forward(self, x: torch.Tensor) -> torch.Tensor: | ||
"""Perform the generative attack via generator inference on a batch of images. | ||
Args: | ||
x: A batch of images. Shape: (N, C, H, W). | ||
Returns: | ||
The perturbed images if successful. Shape: (N, C, H, W). | ||
""" | ||
|
||
x_unrestricted = self.generator(x) | ||
delta = torch.clamp(x_unrestricted - x, -self.eps, self.eps) | ||
x_adv = torch.clamp(x + delta, self.clip_min, self.clip_max) | ||
return x_adv | ||
|
||
@abstractmethod | ||
def _init_generator(self, *args: Any, **kwds: Any) -> Any: | ||
pass |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,34 @@ | ||
from dataclasses import dataclass | ||
from enum import Enum | ||
from typing import Any, Mapping | ||
|
||
from torch.hub import load_state_dict_from_url | ||
|
||
|
||
@dataclass | ||
class Weights: | ||
url: str | ||
|
||
|
||
class WeightsEnum(Enum): | ||
@classmethod | ||
def verify(cls, obj: Any) -> Any: | ||
if obj is not None: | ||
if type(obj) is str: | ||
obj = cls[obj.replace(cls.__name__ + '.', '')] | ||
elif not isinstance(obj, cls): | ||
raise TypeError( | ||
f'Invalid Weight class provided; expected {cls.__name__} ' | ||
f'but received {obj.__class__.__name__}.' | ||
) | ||
return obj | ||
|
||
def get_state_dict(self, *args: Any, **kwargs: Any) -> Mapping[str, Any]: | ||
return load_state_dict_from_url(self.url, *args, **kwargs) | ||
|
||
def __repr__(self) -> str: | ||
return f'{self.__class__.__name__}.{self._name_}' | ||
|
||
@property | ||
def url(self): | ||
return self.value.url |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,144 @@ | ||
import torch | ||
import torch.nn as nn | ||
|
||
# To control feature map in generator | ||
ngf = 64 | ||
|
||
|
||
class ResNetGenerator(nn.Module): | ||
def __init__(self, inception=False): | ||
"""Generator network (ResNet). | ||
Args: | ||
inception: if True crop layer will be added to go from 3x300x300 to | ||
3x299x299. Defaults to False. | ||
""" | ||
|
||
super(ResNetGenerator, self).__init__() | ||
|
||
# Input_size = 3, n, n | ||
self.inception = inception | ||
self.block1 = nn.Sequential( | ||
nn.ReflectionPad2d(3), | ||
nn.Conv2d(3, ngf, kernel_size=7, padding=0, bias=False), | ||
nn.BatchNorm2d(ngf), | ||
nn.ReLU(True), | ||
) | ||
|
||
# Input size = 3, n, n | ||
self.block2 = nn.Sequential( | ||
nn.Conv2d(ngf, ngf * 2, kernel_size=3, stride=2, padding=1, bias=False), | ||
nn.BatchNorm2d(ngf * 2), | ||
nn.ReLU(True), | ||
) | ||
|
||
# Input size = 3, n/2, n/2 | ||
self.block3 = nn.Sequential( | ||
nn.Conv2d(ngf * 2, ngf * 4, kernel_size=3, stride=2, padding=1, bias=False), | ||
nn.BatchNorm2d(ngf * 4), | ||
nn.ReLU(True), | ||
) | ||
|
||
# Input size = 3, n/4, n/4 | ||
# Residual Blocks: 6 | ||
self.resblock1 = ResidualBlock(ngf * 4) | ||
self.resblock2 = ResidualBlock(ngf * 4) | ||
self.resblock3 = ResidualBlock(ngf * 4) | ||
self.resblock4 = ResidualBlock(ngf * 4) | ||
self.resblock5 = ResidualBlock(ngf * 4) | ||
self.resblock6 = ResidualBlock(ngf * 4) | ||
|
||
# Input size = 3, n/4, n/4 | ||
self.upsampl1 = nn.Sequential( | ||
nn.ConvTranspose2d( | ||
ngf * 4, | ||
ngf * 2, | ||
kernel_size=3, | ||
stride=2, | ||
padding=1, | ||
output_padding=1, | ||
bias=False, | ||
), | ||
nn.BatchNorm2d(ngf * 2), | ||
nn.ReLU(True), | ||
) | ||
|
||
# Input size = 3, n/2, n/2 | ||
self.upsampl2 = nn.Sequential( | ||
nn.ConvTranspose2d( | ||
ngf * 2, | ||
ngf, | ||
kernel_size=3, | ||
stride=2, | ||
padding=1, | ||
output_padding=1, | ||
bias=False, | ||
), | ||
nn.BatchNorm2d(ngf), | ||
nn.ReLU(True), | ||
) | ||
|
||
# Input size = 3, n, n | ||
self.blockf = nn.Sequential( | ||
nn.ReflectionPad2d(3), nn.Conv2d(ngf, 3, kernel_size=7, padding=0) | ||
) | ||
|
||
self.crop = nn.ConstantPad2d((0, -1, -1, 0), 0) | ||
|
||
def forward(self, input): | ||
x = self.block1(input) | ||
x = self.block2(x) | ||
x = self.block3(x) | ||
x = self.resblock1(x) | ||
x = self.resblock2(x) | ||
x = self.resblock3(x) | ||
x = self.resblock4(x) | ||
x = self.resblock5(x) | ||
x = self.resblock6(x) | ||
x = self.upsampl1(x) | ||
x = self.upsampl2(x) | ||
x = self.blockf(x) | ||
if self.inception: | ||
x = self.crop(x) | ||
return (torch.tanh(x) + 1) / 2 # Output range [0 1] | ||
|
||
|
||
class ResidualBlock(nn.Module): | ||
def __init__(self, num_filters): | ||
super(ResidualBlock, self).__init__() | ||
self.block = nn.Sequential( | ||
nn.ReflectionPad2d(1), | ||
nn.Conv2d( | ||
in_channels=num_filters, | ||
out_channels=num_filters, | ||
kernel_size=3, | ||
stride=1, | ||
padding=0, | ||
bias=False, | ||
), | ||
nn.BatchNorm2d(num_filters), | ||
nn.ReLU(True), | ||
nn.Dropout(0.5), | ||
nn.ReflectionPad2d(1), | ||
nn.Conv2d( | ||
in_channels=num_filters, | ||
out_channels=num_filters, | ||
kernel_size=3, | ||
stride=1, | ||
padding=0, | ||
bias=False, | ||
), | ||
nn.BatchNorm2d(num_filters), | ||
) | ||
|
||
def forward(self, x): | ||
residual = self.block(x) | ||
return x + residual | ||
|
||
|
||
if __name__ == '__main__': | ||
net_g = ResNetGenerator() | ||
test_sample = torch.rand(1, 3, 32, 32) | ||
print('Generator output size:', net_g(test_sample).size()) | ||
params = sum(p.numel() for p in net_g.parameters() if p.requires_grad) | ||
print('Generator params:', params) |