From 2678626591eca2b61f79f9cc2206eb3f702d9bac Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 19 Apr 2023 18:15:09 +0000 Subject: [PATCH 01/55] initial commit Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/__init__.py | 1 + .../smoothed_vision_transformers/pytorch.py | 77 +++++++++++++++++++ .../smooth_vit.py | 20 +++++ 3 files changed, 98 insertions(+) create mode 100644 art/estimators/certification/smoothed_vision_transformers/__init__.py create mode 100644 art/estimators/certification/smoothed_vision_transformers/pytorch.py create mode 100644 art/estimators/certification/smoothed_vision_transformers/smooth_vit.py diff --git a/art/estimators/certification/smoothed_vision_transformers/__init__.py b/art/estimators/certification/smoothed_vision_transformers/__init__.py new file mode 100644 index 0000000000..c421f11a28 --- /dev/null +++ b/art/estimators/certification/smoothed_vision_transformers/__init__.py @@ -0,0 +1 @@ +from smooth_vit import PyTorchSmoothedViT \ No newline at end of file diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers/pytorch.py new file mode 100644 index 0000000000..6f6012289f --- /dev/null +++ b/art/estimators/certification/smoothed_vision_transformers/pytorch.py @@ -0,0 +1,77 @@ +from __future__ import absolute_import, division, print_function, unicode_literals + +import logging +from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING +import random + +import numpy as np +from tqdm import tqdm + +from art.estimators.classification.pytorch import PyTorchClassifier +from art.estimators.certification.smoothed_vision_transformers.smooth_vit import ColumnAblator + + +class PyTorchSmoothedViT(PyTorchClassifier): + def __init__( + self, + model: "torch.nn.Module", + loss: "torch.nn.modules.loss._Loss", + input_shape: Tuple[int, ...], + nb_classes: int, + ablation_type: str, + ablation_size: int, + threshold: float, + logits: bool, + optimizer: Optional["torch.optim.Optimizer"] = None, # type: ignore + channels_first: bool = True, + clip_values: Optional["CLIP_VALUES_TYPE"] = None, + preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, + postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), + device_type: str = "gpu", + ): + """ + Create a smoothed ViT classifier. + + :param model: PyTorch model. The output of the model can be logits, probabilities or anything else. Logits + output should be preferred where possible to ensure attack efficiency. + :param loss: The loss function for which to compute gradients for training. The target label must be raw + categorical, i.e. not converted to one-hot encoding. + :param input_shape: The shape of one input instance. + :param nb_classes: The number of classes of the model. + :param ablation_type: The type of ablation to perform, must be either "column" or "block" + :param ablation_size: The size of the data portion to retain after ablation. Will be a column of size N for + "column" ablation type or a NxN square for ablation of type "block" + :param threshold: The minimum threshold to count a prediction. + :param logits: if the model returns logits or normalized probabilities + :param optimizer: The optimizer used to train the classifier. + :param channels_first: Set channels first or last. + :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and + maximum values allowed for features. If floats are provided, these will be used as the range of all + features. If arrays are provided, each value will be considered the bound for a feature, thus + the shape of clip values needs to match the total number of features. + :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier. + :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier. + :param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be + used for data preprocessing. The first value will be subtracted from the input. The input will then + be divided by the second one. + :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. + """ + super().__init__( + model=model, + loss=loss, + input_shape=input_shape, + nb_classes=nb_classes, + optimizer=optimizer, + channels_first=channels_first, + clip_values=clip_values, + preprocessing_defences=preprocessing_defences, + postprocessing_defences=postprocessing_defences, + preprocessing=preprocessing, + device_type=device_type, + logits=logits, + ) + + self.ablation_type = ablation_type + self.ablation_size = ablation_size, + self.threshold = threshold \ No newline at end of file diff --git a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py new file mode 100644 index 0000000000..6ea62a146b --- /dev/null +++ b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py @@ -0,0 +1,20 @@ + +""" +This module implements Certified Patch Robustness via Smoothed Vision Transformers + +| Paper link: https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf +""" +import torch as nn + +class ColumnAblator(nn.Module): + """ + Pure Pytorch implementation of stripe/column ablation. + """ + def __init__(self, ablation_size: int, channels_first: bool, row_ablation_mode: bool = False): + super().__init__() + self.ablation_size = ablation_size + self.channels_first = channels_first + self.row_ablation_mode = row_ablation_mode + + def forward(self): + raise NotImplementedError From 198349bc69a62b994cdcce4c8cc64b1e9fa422a4 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Mon, 24 Apr 2023 10:55:55 +0000 Subject: [PATCH 02/55] train and upsample methods Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/__init__.py | 2 +- .../smoothed_vision_transformers/pytorch.py | 112 +++++++++++++++++- .../smooth_vit.py | 36 +++++- dev.py | 91 ++++++++++++++ 4 files changed, 232 insertions(+), 9 deletions(-) create mode 100644 dev.py diff --git a/art/estimators/certification/smoothed_vision_transformers/__init__.py b/art/estimators/certification/smoothed_vision_transformers/__init__.py index c421f11a28..fd2b959474 100644 --- a/art/estimators/certification/smoothed_vision_transformers/__init__.py +++ b/art/estimators/certification/smoothed_vision_transformers/__init__.py @@ -1 +1 @@ -from smooth_vit import PyTorchSmoothedViT \ No newline at end of file +from art.estimators.certification.smoothed_vision_transformers.pytorch import PyTorchSmoothedViT \ No newline at end of file diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers/pytorch.py index 6f6012289f..0859d9496d 100644 --- a/art/estimators/certification/smoothed_vision_transformers/pytorch.py +++ b/art/estimators/certification/smoothed_vision_transformers/pytorch.py @@ -9,6 +9,9 @@ from art.estimators.classification.pytorch import PyTorchClassifier from art.estimators.certification.smoothed_vision_transformers.smooth_vit import ColumnAblator +from art.utils import check_and_transform_label_format + +logger = logging.getLogger(__name__) class PyTorchSmoothedViT(PyTorchClassifier): @@ -69,9 +72,114 @@ def __init__( postprocessing_defences=postprocessing_defences, preprocessing=preprocessing, device_type=device_type, - logits=logits, ) self.ablation_type = ablation_type self.ablation_size = ablation_size, - self.threshold = threshold \ No newline at end of file + self.threshold = threshold + self.logits = logits + + print(self.model) + self.ablator = ColumnAblator(ablation_size=ablation_size, + channels_first=True, + row_ablation_mode=False) + + def fit( # pylint: disable=W0221 + self, + x: np.ndarray, + y: np.ndarray, + batch_size: int = 128, + nb_epochs: int = 10, + training_mode: bool = True, + drop_last: bool = False, + scheduler: Optional[Any] = None, + verbose=True, + **kwargs, + ) -> None: + """ + Fit the classifier on the training set `(x, y)`. + :param x: Training data. + :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of + shape (nb_samples,). + :param batch_size: Size of batches. + :param nb_epochs: Number of epochs to use for training. + :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by + the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then + the last batch will be smaller. (default: ``False``) + :param scheduler: Learning rate scheduler to run at the start of every epoch. + :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch + and providing it takes no effect. + """ + import torch + + # Set model mode + self._model.train(mode=training_mode) + + if self._optimizer is None: # pragma: no cover + raise ValueError("An optimizer is needed to train the model, but none for provided.") + + y = check_and_transform_label_format(y, nb_classes=self.nb_classes) + + # Apply preprocessing + x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) + + # Check label shape + y_preprocessed = self.reduce_labels(y_preprocessed) + + num_batch = len(x_preprocessed) / float(batch_size) + if drop_last: + num_batch = int(np.floor(num_batch)) + else: + num_batch = int(np.ceil(num_batch)) + ind = np.arange(len(x_preprocessed)) + + # Start training + for _ in tqdm(range(nb_epochs)): + # Shuffle the examples + random.shuffle(ind) + pbar = tqdm(range(num_batch), disable=not verbose) + + # Train for one epoch + for m in pbar: + i_batch = torch.from_numpy(np.copy(x_preprocessed[ind[m * batch_size: (m + 1) * batch_size]])).to(self._device) + i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) + + o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size: (m + 1) * batch_size]]).to( + self._device) + + # Zero the parameter gradients + self._optimizer.zero_grad() + + # Perform prediction + try: + model_outputs = self._model(i_batch) + except ValueError as err: + if "Expected more than 1 value per channel when training" in str(err): + logger.exception( + "Try dropping the last incomplete batch by setting drop_last=True in " + "method PyTorchClassifier.fit." + ) + raise err + # Form the loss function + loss = self._loss(model_outputs[-1], o_batch) + + # Do training + if self._use_amp: # pragma: no cover + from apex import amp # pylint: disable=E0611 + + with amp.scale_loss(loss, self._optimizer) as scaled_loss: + scaled_loss.backward() + + else: + loss.backward() + + self._optimizer.step() + + if verbose: + pbar.set_description( + f"Loss {loss}:.2f" + ) + + if scheduler is not None: + scheduler.step() diff --git a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py index 6ea62a146b..d8dd2c5398 100644 --- a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py +++ b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py @@ -1,12 +1,24 @@ - """ This module implements Certified Patch Robustness via Smoothed Vision Transformers -| Paper link: https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf +| Paper link Accepted version: + https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf + +| Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf """ -import torch as nn +import torch + + +class UpSampler(torch.nn.Module): + def __init__(self, input_size, final_size): + super(UpSampler, self).__init__() + self.upsample = torch.nn.Upsample(scale_factor=final_size/input_size) -class ColumnAblator(nn.Module): + def forward(self, x): + return self.upsample(x) + + +class ColumnAblator(torch.nn.Module): """ Pure Pytorch implementation of stripe/column ablation. """ @@ -15,6 +27,18 @@ def __init__(self, ablation_size: int, channels_first: bool, row_ablation_mode: self.ablation_size = ablation_size self.channels_first = channels_first self.row_ablation_mode = row_ablation_mode + self.upsample = UpSampler(input_size=32, final_size=224) + + def ablate(self, x, column_pos): + k = self.ablation_size + if column_pos + k > x.shape[-1]: + x[:, :, :, (column_pos + k) % x.shape[-1]:column_pos] = 0.0 + else: + x[:, :, :, :column_pos] = 0.0 + x[:, :, :, column_pos + k:] = 0.0 + return x - def forward(self): - raise NotImplementedError + def forward(self, x, column_pos): + x = self.ablate(x, column_pos=column_pos) + x = self.upsample(x) + return x diff --git a/dev.py b/dev.py new file mode 100644 index 0000000000..625095a95f --- /dev/null +++ b/dev.py @@ -0,0 +1,91 @@ + +# https://github.com/huggingface/pytorch-image-models +import torch +import torch.nn as nn + +from timm.models.vision_transformer import VisionTransformer, vit_small_patch16_224 +from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT +import copy +import numpy as np +from torchvision import datasets +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +model = vit_small_patch16_224(pretrained=True) +print(type(model)) + + +def get_cifar_data(): + """ + Get CIFAR-10 data. + :return: cifar train/test data. + """ + train_set = datasets.CIFAR10('./data', train=True, download=True) + test_set = datasets.CIFAR10('./data', train=False, download=True) + + x_train = train_set.data.astype(np.float32) + y_train = np.asarray(train_set.targets) + + x_test = test_set.data.astype(np.float32) + y_test = np.asarray(test_set.targets) + + x_train = np.moveaxis(x_train, [3], [1]) + x_test = np.moveaxis(x_test, [3], [1]) + + x_train = x_train / 255.0 + x_test = x_test / 255.0 + + return (x_train, y_train), (x_test, y_test) + + +def update_batchnorm(model, x): + import random + from tqdm import tqdm + + art_model.model.train() + batch_size = 32 + + ind = np.arange(len(x)) + num_batch = int(len(x) / float(batch_size)) + + print('updating batchnorm') + with torch.no_grad(): + for _ in tqdm(range(200)): + for m in tqdm(range(num_batch)): + i_batch = torch.from_numpy(np.copy(x[ind[m * batch_size: (m + 1) * batch_size]])).to(device) + i_batch = art_model.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) + art_model.model(i_batch.cuda()) + return model + +(x_train, y_train), (x_test, y_test) = get_cifar_data() +x_test = torch.from_numpy(x_test) +print('params: ', model.parameters()) +optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9) +scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 20], gamma=0.1) + +# Use same initial point as Madry +checkpoint = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth", + map_location="cpu", check_hash=True +) +model.load_state_dict(checkpoint["model"]) + +art_model = PyTorchSmoothedViT(model=model, + loss=torch.nn.CrossEntropyLoss(), + input_shape=(3, 224, 224), + optimizer=optimizer, + nb_classes=10, + ablation_type='column', + ablation_size=4, + threshold=0.01, + logits=True) + +ablated_x = art_model.ablator.ablate(x=copy.deepcopy(x_test[:32]), + column_pos=1) + +print('test position 31') + +ablated_x = art_model.ablator.ablate(x=copy.deepcopy(x_test[:32]), + column_pos=31) + +art_model = update_batchnorm(art_model, x_train) +art_model.fit(x_train, y_train) From 114b696c91f7edd575a6ba15a4b927c6b529408d Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Sat, 6 May 2023 14:56:52 +0000 Subject: [PATCH 03/55] adding certification method to smoothed_vit Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/__init__.py | 2 +- .../smoothed_vision_transformers/smooth_vit.py | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/art/estimators/certification/smoothed_vision_transformers/__init__.py b/art/estimators/certification/smoothed_vision_transformers/__init__.py index fd2b959474..fc9a45bb4f 100644 --- a/art/estimators/certification/smoothed_vision_transformers/__init__.py +++ b/art/estimators/certification/smoothed_vision_transformers/__init__.py @@ -1 +1 @@ -from art.estimators.certification.smoothed_vision_transformers.pytorch import PyTorchSmoothedViT \ No newline at end of file +from art.estimators.certification.smoothed_vision_transformers.pytorch import PyTorchSmoothedViT, ArtViT \ No newline at end of file diff --git a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py index d8dd2c5398..d871c54158 100644 --- a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py +++ b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py @@ -39,6 +39,20 @@ def ablate(self, x, column_pos): return x def forward(self, x, column_pos): + assert x.shape[1] == 3 + ones = torch.torch.ones_like(x[:, 0:1, :, :]).cuda() + x = torch.cat([x, ones], dim=1) x = self.ablate(x, column_pos=column_pos) - x = self.upsample(x) - return x + return self.upsample(x) + + def certify(self, predictions, size_to_certify, label): + + num_of_classes = predictions.shape[-1] + + top_class_counts, top_predicted_class = predictions.kthvalue(num_of_classes, dim=1) + second_class_counts, second_predicted_class = predictions.kthvalue(num_of_classes - 1, dim=1) + + cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) + cert_and_correct = cert & label == top_predicted_class + + return cert, cert_and_correct From cda2e300197fb8f96657f5ada03cf7a9c95a2bf0 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 10 May 2023 14:58:15 +0000 Subject: [PATCH 04/55] initial ViT functionality Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/pytorch.py | 257 +++++++++++++++++- 1 file changed, 251 insertions(+), 6 deletions(-) diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers/pytorch.py index 0859d9496d..1835844640 100644 --- a/art/estimators/certification/smoothed_vision_transformers/pytorch.py +++ b/art/estimators/certification/smoothed_vision_transformers/pytorch.py @@ -1,8 +1,13 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging +import sys from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING import random +import torch +import copy + +from timm.models.vision_transformer import VisionTransformer import numpy as np from tqdm import tqdm @@ -12,6 +17,125 @@ from art.utils import check_and_transform_label_format logger = logging.getLogger(__name__) +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + +class PatchEmbed(torch.nn.Module): + """ Image to Patch Embedding + + Class adapted from the implementation in https://github.com/MadryLab/smoothed-vit + + Original License: + + MIT License + + Copyright (c) 2021 Madry Lab + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + num_patches = (img_size // patch_size) * (img_size // patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = torch.nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=False) + w_shape = self.proj.weight.shape + self.proj.weight = torch.nn.Parameter(torch.ones(w_shape).to(device)) + + def forward(self, x): + with torch.no_grad(): + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class ArtViT(VisionTransformer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def loop_drop_tokens(self, x, indexes): + # print('applying masked_select') + # print('x ', x.shape) + # print('indexes ', indexes.shape) + x_no_cl, cls_token = x[:, 1:], x[:, 0:1] + + # loop for now: change later to be batched + x_selected = [] + for sample, i in zip(x_no_cl, indexes): + i = (i == True).nonzero(as_tuple=True)[0] + # print('indexes ', i) + # print('indexes shape', i.shape) + tmp = torch.index_select(sample, dim=0, index=i) + # print('tmp ', tmp.shape) + x_selected.append(tmp) + + x_selected = torch.stack(x_selected, dim=0) + # print('x dropped ', x_selected.shape) + # print('cls_token ', cls_token.shape) + return torch.cat((cls_token, x_selected), dim=1) + + def batch_drop_tokens(self, x, indexes): + x_no_cl, cls_token = x[:, 1:], x[:, 0:1] + shape = x_no_cl.shape + + # reshape to temporarily remove batch + x_no_cl = torch.reshape(x_no_cl, shape=(-1, shape[-1])) + indexes = torch.reshape(indexes, shape=(-1,)) + indexes = (indexes == True).nonzero(as_tuple=True)[0] + + x_no_cl = torch.index_select(x_no_cl, dim=0, index=indexes) + x_no_cl = torch.reshape(x_no_cl, shape=(shape[0], -1, shape[-1])) + return torch.cat((cls_token, x_no_cl), dim=1) + + def forward_features(self, x): + """ + The forward pass of the ViT. + + """ + drop_tokens = True + + if x.shape[1] == 4: + x, ablation_mask = x[:, :3], x[:, 3:4] + + x = self.patch_embed(x) + x = self._pos_embed(x) + + if drop_tokens: + ablation_mask_embedder = PatchEmbed(in_chans=1) + ones = ablation_mask_embedder(ablation_mask) + to_drop = torch.sum(ones, dim=2) + indexes = torch.gt(torch.where(to_drop > 1, 1, 0), 0) + + check_i = indexes[0] + check_val = to_drop[0] + for i, s in zip(indexes, to_drop): + if not torch.equal(check_i, i): + for ci, ei, val, cval in zip(check_i, i, s, check_val): + print(f'{ci} with {cval} vs {ei} with {val}') + sys.exit() + + x = self.batch_drop_tokens(x, indexes) + + x = self.blocks(x) + return self.norm(x) class PyTorchSmoothedViT(PyTorchClassifier): @@ -36,8 +160,7 @@ def __init__( """ Create a smoothed ViT classifier. - :param model: PyTorch model. The output of the model can be logits, probabilities or anything else. Logits - output should be preferred where possible to ensure attack efficiency. + :param model: string specifying which ViT architecture to load :param loss: The loss function for which to compute gradients for training. The target label must be raw categorical, i.e. not converted to one-hot encoding. :param input_shape: The shape of one input instance. @@ -60,6 +183,15 @@ def __init__( be divided by the second one. :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. """ + import timm + from timm.models.vision_transformer import VisionTransformer, vit_small_patch16_224 + timm.models.vision_transformer._create_vision_transformer = self.art_create_vision_transformer + model = vit_small_patch16_224() + model.head = torch.nn.Linear(model.head.in_features, nb_classes) + + # TODO: enable users to pass in opt hyperparameters + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0001) + super().__init__( model=model, loss=loss, @@ -84,6 +216,46 @@ def __init__( channels_first=True, row_ablation_mode=False) + @staticmethod + def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> ArtViT: + """ + Creates a vision transformer using ArtViT which controls the forward pass of the model + + :param variant: The name of the vision transformer to load + :param pretrained: If to load pre-trained weights + """ + from timm.models._builder import build_model_with_cfg + from timm.models.vision_transformer import checkpoint_filter_fn + return build_model_with_cfg( + ArtViT, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs, + ) + + def update_batchnorm(self, x: np.ndarray, batch_size: int) -> None: + """ + Method to update the batchnorm of a ViT on small datasets + :param x: + :param batch_size: Size of batches. + """ + import random + import time + + self.model.train() + + ind = np.arange(len(x)) + num_batch = int(len(x) / float(batch_size)) + + print('updating batchnorm') + s = time.time() + with torch.no_grad(): + for _ in tqdm(range(1)): + for m in tqdm(range(num_batch)): + i_batch = torch.from_numpy(np.copy(x[ind[m * batch_size: (m + 1) * batch_size]])).to(device) + i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) + _ = self.model(i_batch) + print('total time taken is ', time.time() - s) + def fit( # pylint: disable=W0221 self, x: np.ndarray, @@ -93,7 +265,8 @@ def fit( # pylint: disable=W0221 training_mode: bool = True, drop_last: bool = False, scheduler: Optional[Any] = None, - verbose=True, + update_batchnorm: bool = True, + verbose: bool = True, **kwargs, ) -> None: """ @@ -108,6 +281,8 @@ def fit( # pylint: disable=W0221 the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) :param scheduler: Learning rate scheduler to run at the start of every epoch. + :param update_batchnorm: ... + :param verbose: ... :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch and providing it takes no effect. """ @@ -124,6 +299,9 @@ def fit( # pylint: disable=W0221 # Apply preprocessing x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) + if update_batchnorm: + self.update_batchnorm(x_preprocessed, batch_size) + # Check label shape y_preprocessed = self.reduce_labels(y_preprocessed) @@ -138,6 +316,9 @@ def fit( # pylint: disable=W0221 for _ in tqdm(range(nb_epochs)): # Shuffle the examples random.shuffle(ind) + epoch_acc = [] + epoch_loss = [] + pbar = tqdm(range(num_batch), disable=not verbose) # Train for one epoch @@ -153,7 +334,7 @@ def fit( # pylint: disable=W0221 # Perform prediction try: - model_outputs = self._model(i_batch) + model_outputs = self.model(i_batch) except ValueError as err: if "Expected more than 1 value per channel when training" in str(err): logger.exception( @@ -162,7 +343,11 @@ def fit( # pylint: disable=W0221 ) raise err # Form the loss function - loss = self._loss(model_outputs[-1], o_batch) + # print('the model outputs are ', model_outputs.shape) + loss = self.loss(model_outputs, o_batch) + acc = self.get_accuracy(preds=model_outputs, labels=o_batch) + epoch_acc.append(acc) + epoch_loss.append(loss) # Do training if self._use_amp: # pragma: no cover @@ -178,8 +363,68 @@ def fit( # pylint: disable=W0221 if verbose: pbar.set_description( - f"Loss {loss}:.2f" + f"Loss {torch.mean(torch.stack(epoch_loss)):.2f}" + f" Acc {np.mean(epoch_acc):.2f}" ) if scheduler is not None: scheduler.step() + + def certify(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128): + # set to eval + # self._model.train(mode=training_mode) + drop_last = True + verbose = True + y = check_and_transform_label_format(y, nb_classes=self.nb_classes) + + # Apply preprocessing + x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) + + # Check label shape + y_preprocessed = self.reduce_labels(y_preprocessed) + + num_batch = len(x_preprocessed) / float(batch_size) + if drop_last: + num_batch = int(np.floor(num_batch)) + else: + num_batch = int(np.ceil(num_batch)) + pbar = tqdm(range(num_batch), disable=not verbose) + + with torch.no_grad(): + for m in pbar: + i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size: (m + 1) * batch_size])).to(self._device) + o_batch = torch.from_numpy(y_preprocessed[m * batch_size: (m + 1) * batch_size]).to(self._device) + predictions = [] + pred_counts = torch.zeros((batch_size, 10)).to(self._device) + for pos in range(0, 30): + # print(i_batch.shape) + ablated_batch = self.ablator.forward(i_batch, column_pos=pos) + + # Perform prediction + model_outputs = self.model(ablated_batch) + # print(model_outputs.argmax(dim=-1)) + # print(model_outputs.argmax(dim=-1).shape) + pred_counts[np.arange(0, batch_size), model_outputs.argmax(dim=-1)] += 1 + + predictions.append(model_outputs) + + cert, cert_and_correct = self.ablator.certify(pred_counts, size_to_certify=5, label=o_batch) + print(torch.sum(cert)) + print(torch.sum(cert_and_correct) / batch_size) + + @staticmethod + def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: + """ + Helper function to print out the accuracy during training + + :param preds: (concrete) model predictions + :param labels: ground truth labels (not one hot) + :return: prediction accuracy + """ + if isinstance(preds, torch.Tensor): + preds = preds.detach().cpu().numpy() + + if isinstance(labels, torch.Tensor): + labels = labels.detach().cpu().numpy() + + return np.sum(np.argmax(preds, axis=1) == labels) / len(labels) \ No newline at end of file From c2f38df28fac91642ba1289bf4e07664eb1189ba Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 10 May 2023 15:13:20 +0000 Subject: [PATCH 05/55] dev testing script Signed-off-by: GiulioZizzo --- dev.py | 56 ++++++++------------------------------------------------ 1 file changed, 8 insertions(+), 48 deletions(-) diff --git a/dev.py b/dev.py index 625095a95f..fb30887b53 100644 --- a/dev.py +++ b/dev.py @@ -1,18 +1,15 @@ - -# https://github.com/huggingface/pytorch-image-models import torch import torch.nn as nn -from timm.models.vision_transformer import VisionTransformer, vit_small_patch16_224 -from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT +from timm.models.vision_transformer import VisionTransformer +from timm.models.vision_transformer import checkpoint_filter_fn +from functools import partial +from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT, ArtViT import copy import numpy as np from torchvision import datasets device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -model = vit_small_patch16_224(pretrained=True) -print(type(model)) - def get_cifar_data(): """ @@ -37,55 +34,18 @@ def get_cifar_data(): return (x_train, y_train), (x_test, y_test) -def update_batchnorm(model, x): - import random - from tqdm import tqdm - - art_model.model.train() - batch_size = 32 - - ind = np.arange(len(x)) - num_batch = int(len(x) / float(batch_size)) - - print('updating batchnorm') - with torch.no_grad(): - for _ in tqdm(range(200)): - for m in tqdm(range(num_batch)): - i_batch = torch.from_numpy(np.copy(x[ind[m * batch_size: (m + 1) * batch_size]])).to(device) - i_batch = art_model.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) - art_model.model(i_batch.cuda()) - return model - (x_train, y_train), (x_test, y_test) = get_cifar_data() x_test = torch.from_numpy(x_test) -print('params: ', model.parameters()) -optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9) -scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 20], gamma=0.1) - -# Use same initial point as Madry -checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth", - map_location="cpu", check_hash=True -) -model.load_state_dict(checkpoint["model"]) -art_model = PyTorchSmoothedViT(model=model, +art_model = PyTorchSmoothedViT(model='vit_small_patch16_224', loss=torch.nn.CrossEntropyLoss(), input_shape=(3, 224, 224), - optimizer=optimizer, nb_classes=10, ablation_type='column', ablation_size=4, threshold=0.01, logits=True) -ablated_x = art_model.ablator.ablate(x=copy.deepcopy(x_test[:32]), - column_pos=1) - -print('test position 31') - -ablated_x = art_model.ablator.ablate(x=copy.deepcopy(x_test[:32]), - column_pos=31) - -art_model = update_batchnorm(art_model, x_train) -art_model.fit(x_train, y_train) +scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1) +art_model.fit(x_train, y_train, update_batchnorm=True, scheduler=scheduler) +art_model.certify(x_train, y_train) From 394e49ce2cb8683d0e2029fb5c3c0581fe4e95ce Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 10 May 2023 20:53:05 +0100 Subject: [PATCH 06/55] adding evaluation and load pretrained with optimizer updates Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/pytorch.py | 148 ++++++++++++------ .../smooth_vit.py | 5 +- dev.py | 14 +- 3 files changed, 107 insertions(+), 60 deletions(-) diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers/pytorch.py index 1835844640..66474e8de5 100644 --- a/art/estimators/certification/smoothed_vision_transformers/pytorch.py +++ b/art/estimators/certification/smoothed_vision_transformers/pytorch.py @@ -56,8 +56,22 @@ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches + self.in_chans = in_chans + self.embed_dim = embed_dim + + def create(self, patch_size=None, embed_dim=None, **kwargs): + + if patch_size is not None: + self.patch_size = patch_size + if embed_dim is not None: + self.embed_dim = embed_dim + + self.proj = torch.nn.Conv2d(in_channels=1, + out_channels=self.embed_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + bias=False) - self.proj = torch.nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=False) w_shape = self.proj.weight.shape self.proj.weight = torch.nn.Parameter(torch.ones(w_shape).to(device)) @@ -68,31 +82,23 @@ def forward(self, x): class ArtViT(VisionTransformer): + + # Make as a class attribute to avoid being included in the + # state dictionaries of the ViT Model. + ablation_mask_embedder = PatchEmbed(in_chans=1) + def __init__(self, **kwargs): super().__init__(**kwargs) + self.ablation_mask_embedder.create(**kwargs) - def loop_drop_tokens(self, x, indexes): - # print('applying masked_select') - # print('x ', x.shape) - # print('indexes ', indexes.shape) - x_no_cl, cls_token = x[:, 1:], x[:, 0:1] - - # loop for now: change later to be batched - x_selected = [] - for sample, i in zip(x_no_cl, indexes): - i = (i == True).nonzero(as_tuple=True)[0] - # print('indexes ', i) - # print('indexes shape', i.shape) - tmp = torch.index_select(sample, dim=0, index=i) - # print('tmp ', tmp.shape) - x_selected.append(tmp) - - x_selected = torch.stack(x_selected, dim=0) - # print('x dropped ', x_selected.shape) - # print('cls_token ', cls_token.shape) - return torch.cat((cls_token, x_selected), dim=1) - - def batch_drop_tokens(self, x, indexes): + @staticmethod + def drop_tokens(x, indexes): + """ + Drops the tokens which correspond to fully masked inputs + :param x: Input data in .... format + :param indexes: positions to be ablated + return + """ x_no_cl, cls_token = x[:, 1:], x[:, 0:1] shape = x_no_cl.shape @@ -108,6 +114,9 @@ def batch_drop_tokens(self, x, indexes): def forward_features(self, x): """ The forward pass of the ViT. + #TODO! check for 1 channel inputs! + + :param x: Input data. """ drop_tokens = True @@ -119,8 +128,7 @@ def forward_features(self, x): x = self._pos_embed(x) if drop_tokens: - ablation_mask_embedder = PatchEmbed(in_chans=1) - ones = ablation_mask_embedder(ablation_mask) + ones = self.ablation_mask_embedder(ablation_mask) to_drop = torch.sum(ones, dim=2) indexes = torch.gt(torch.where(to_drop > 1, 1, 0), 0) @@ -132,7 +140,7 @@ def forward_features(self, x): print(f'{ci} with {cval} vs {ei} with {val}') sys.exit() - x = self.batch_drop_tokens(x, indexes) + x = self.drop_tokens(x, indexes) x = self.blocks(x) return self.norm(x) @@ -141,26 +149,28 @@ def forward_features(self, x): class PyTorchSmoothedViT(PyTorchClassifier): def __init__( self, - model: "torch.nn.Module", + model: ["VisionTransformer", str], loss: "torch.nn.modules.loss._Loss", input_shape: Tuple[int, ...], nb_classes: int, ablation_type: str, ablation_size: int, threshold: float, - logits: bool, optimizer: Optional["torch.optim.Optimizer"] = None, # type: ignore + optimizer_params: Optional[dict] = None, channels_first: bool = True, clip_values: Optional["CLIP_VALUES_TYPE"] = None, preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), device_type: str = "gpu", + load_pretrained: bool = True, ): """ Create a smoothed ViT classifier. - :param model: string specifying which ViT architecture to load + :param model: Either a string specifying which ViT architecture to load, or a vision transformer already + created with the Pytorch Image Models (timm) library. :param loss: The loss function for which to compute gradients for training. The target label must be raw categorical, i.e. not converted to one-hot encoding. :param input_shape: The shape of one input instance. @@ -169,7 +179,6 @@ def __init__( :param ablation_size: The size of the data portion to retain after ablation. Will be a column of size N for "column" ablation type or a NxN square for ablation of type "block" :param threshold: The minimum threshold to count a prediction. - :param logits: if the model returns logits or normalized probabilities :param optimizer: The optimizer used to train the classifier. :param channels_first: Set channels first or last. :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and @@ -184,13 +193,35 @@ def __init__( :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. """ import timm - from timm.models.vision_transformer import VisionTransformer, vit_small_patch16_224 + timm.models.vision_transformer._create_vision_transformer = self.art_create_vision_transformer - model = vit_small_patch16_224() - model.head = torch.nn.Linear(model.head.in_features, nb_classes) - # TODO: enable users to pass in opt hyperparameters - optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0001) + if type(model) is str: + model = timm.create_model(model, pretrained=load_pretrained) + model.head = torch.nn.Linear(model.head.in_features, nb_classes) + # TODO: enable users to pass in opt hyperparameters + # optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0001) + optimizer = optimizer(model.parameters(), **optimizer_params) + + else: + pretrained_cfg = model.pretrained_cfg + supplied_state_dict = model.state_dict() + model = timm.create_model(pretrained_cfg['vit_small_patch16_224'], pretrained=load_pretrained) + model.load_state_dict(torch.load(supplied_state_dict)) + model.head = torch.nn.Linear(model.head.in_features, nb_classes) + + if optimizer is not None: + converted_optimizer: Union[torch.optim.Adam, torch.optim.SGD] + opt_state_dict = optimizer.state_dict() + if isinstance(optimizer, torch.optim.Adam): + logging.info("Converting Adam Optimiser") + converted_optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) + elif isinstance(optimizer, torch.optim.SGD): + logging.info("Converting SGD Optimiser") + converted_optimizer = torch.optim.SGD(model.parameters(), lr=1e-4) + else: + raise ValueError("Optimiser not supported for conversion") + converted_optimizer.load_state_dict(opt_state_dict) super().__init__( model=model, @@ -209,7 +240,6 @@ def __init__( self.ablation_type = ablation_type self.ablation_size = ablation_size, self.threshold = threshold - self.logits = logits print(self.model) self.ablator = ColumnAblator(ablation_size=ablation_size, @@ -223,7 +253,9 @@ def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwar :param variant: The name of the vision transformer to load :param pretrained: If to load pre-trained weights + :return: A ViT with the required methods needed for ART """ + from timm.models._builder import build_model_with_cfg from timm.models.vision_transformer import checkpoint_filter_fn return build_model_with_cfg( @@ -294,7 +326,14 @@ def fit( # pylint: disable=W0221 if self._optimizer is None: # pragma: no cover raise ValueError("An optimizer is needed to train the model, but none for provided.") + import torchvision.transforms as transforms + y = check_and_transform_label_format(y, nb_classes=self.nb_classes) + transform = transforms.Compose( + [ + transforms.RandomHorizontalFlip() + ] + ) # Apply preprocessing x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) @@ -324,6 +363,7 @@ def fit( # pylint: disable=W0221 # Train for one epoch for m in pbar: i_batch = torch.from_numpy(np.copy(x_preprocessed[ind[m * batch_size: (m + 1) * batch_size]])).to(self._device) + i_batch = transform(i_batch) i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size: (m + 1) * batch_size]]).to( @@ -359,7 +399,7 @@ def fit( # pylint: disable=W0221 else: loss.backward() - self._optimizer.step() + self.optimizer.step() if verbose: pbar.set_description( @@ -370,9 +410,15 @@ def fit( # pylint: disable=W0221 if scheduler is not None: scheduler.step() - def certify(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128): - # set to eval - # self._model.train(mode=training_mode) + def eval_and_certify(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128): + """ + Evaluates the ViT's normal and certified performance over the supplied data + :param x: Evaluation data + :param y: Evaluation labels + :param batch_size: batch size when evaluating + """ + + self.model.eval() drop_last = True verbose = True y = check_and_transform_label_format(y, nb_classes=self.nb_classes) @@ -389,28 +435,30 @@ def certify(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128): else: num_batch = int(np.ceil(num_batch)) pbar = tqdm(range(num_batch), disable=not verbose) - + accuracy = [] + cert_acc = [] with torch.no_grad(): for m in pbar: i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size: (m + 1) * batch_size])).to(self._device) o_batch = torch.from_numpy(y_preprocessed[m * batch_size: (m + 1) * batch_size]).to(self._device) predictions = [] - pred_counts = torch.zeros((batch_size, 10)).to(self._device) - for pos in range(0, 30): - # print(i_batch.shape) + pred_counts = torch.zeros((batch_size, self.nb_classes)).to(self._device) + for pos in range(i_batch.shape[-1]): + ablated_batch = self.ablator.forward(i_batch, column_pos=pos) # Perform prediction model_outputs = self.model(ablated_batch) - # print(model_outputs.argmax(dim=-1)) - # print(model_outputs.argmax(dim=-1).shape) pred_counts[np.arange(0, batch_size), model_outputs.argmax(dim=-1)] += 1 - predictions.append(model_outputs) - cert, cert_and_correct = self.ablator.certify(pred_counts, size_to_certify=5, label=o_batch) - print(torch.sum(cert)) - print(torch.sum(cert_and_correct) / batch_size) + cert, cert_and_correct, top_predicted_class = self.ablator.certify(pred_counts, size_to_certify=4, label=o_batch) + cert_acc.append(torch.sum(cert_and_correct) / batch_size) + acc = torch.sum(top_predicted_class == o_batch) / batch_size + accuracy.append(acc) + + print('Normal Acc: ', torch.mean(torch.stack(accuracy))) + print('Cert Normal Acc: ', torch.mean(torch.stack(cert_acc))) @staticmethod def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: diff --git a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py index d871c54158..b634393a93 100644 --- a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py +++ b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py @@ -53,6 +53,7 @@ def certify(self, predictions, size_to_certify, label): second_class_counts, second_predicted_class = predictions.kthvalue(num_of_classes - 1, dim=1) cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) - cert_and_correct = cert & label == top_predicted_class - return cert, cert_and_correct + cert_and_correct = cert & (label == top_predicted_class) + + return cert, cert_and_correct, top_predicted_class diff --git a/dev.py b/dev.py index fb30887b53..48c5a932b9 100644 --- a/dev.py +++ b/dev.py @@ -1,11 +1,6 @@ import torch -import torch.nn as nn -from timm.models.vision_transformer import VisionTransformer -from timm.models.vision_transformer import checkpoint_filter_fn -from functools import partial from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT, ArtViT -import copy import numpy as np from torchvision import datasets device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -44,8 +39,11 @@ def get_cifar_data(): ablation_type='column', ablation_size=4, threshold=0.01, - logits=True) + logits=True, + load_pretrained=True) scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1) -art_model.fit(x_train, y_train, update_batchnorm=True, scheduler=scheduler) -art_model.certify(x_train, y_train) +art_model.fit(x_train, y_train, nb_epochs=30, update_batchnorm=True, scheduler=scheduler) +torch.save(art_model.model.state_dict(), 'trained.pt') +art_model.model.load_state_dict(torch.load('trained.pt')) +art_model.eval_and_certify(x_train, y_train) From 7dd0512a5b75a911ca7144a4f39d709a93596963 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 17 May 2023 10:54:34 +0100 Subject: [PATCH 07/55] updating input specification Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/pytorch.py | 75 +++++++++++------ .../smooth_vit.py | 81 ++++++++++++++++--- requirements_test.txt | 3 + 3 files changed, 123 insertions(+), 36 deletions(-) diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers/pytorch.py index 66474e8de5..31ae221688 100644 --- a/art/estimators/certification/smoothed_vision_transformers/pytorch.py +++ b/art/estimators/certification/smoothed_vision_transformers/pytorch.py @@ -50,23 +50,35 @@ class PatchEmbed(torch.nn.Module): SOFTWARE. """ - def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + def __init__(self, patch_size=16, in_channels=1, embed_dim=768): + """ + Specifies the configuration for the convolutional layer. + :param patch_size: The patch size used by the ViT + :param in_chans: Number of input channels. + :param embed_dim: The embedding dimension used by the ViT + + """ super().__init__() - num_patches = (img_size // patch_size) * (img_size // patch_size) - self.img_size = img_size self.patch_size = patch_size - self.num_patches = num_patches - self.in_chans = in_chans + self.in_channels = in_channels self.embed_dim = embed_dim + self.proj: Optional[torch.nn.Conv2d] = None - def create(self, patch_size=None, embed_dim=None, **kwargs): + def create(self, patch_size=None, embed_dim=None, **kwargs) -> None: + """ + Creates a convolution that mimics the embedding layer to be used for the ablation mask to + track where the image was ablated. + + :param patch_size: The patch size used by the ViT + :param embed_dim: The embedding dimension used by the ViT + """ if patch_size is not None: self.patch_size = patch_size if embed_dim is not None: self.embed_dim = embed_dim - self.proj = torch.nn.Conv2d(in_channels=1, + self.proj = torch.nn.Conv2d(in_channels=self.in_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, @@ -75,7 +87,14 @@ def create(self, patch_size=None, embed_dim=None, **kwargs): w_shape = self.proj.weight.shape self.proj.weight = torch.nn.Parameter(torch.ones(w_shape).to(device)) - def forward(self, x): + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Forward pass through the embedder. We are simply tracking the positions of the ablation mask so no gradients + are required. + + :param x: Input data corresponding to the ablation mask + :return: The embedded input + """ with torch.no_grad(): x = self.proj(x).flatten(2).transpose(1, 2) return x @@ -85,19 +104,20 @@ class ArtViT(VisionTransformer): # Make as a class attribute to avoid being included in the # state dictionaries of the ViT Model. - ablation_mask_embedder = PatchEmbed(in_chans=1) + ablation_mask_embedder = PatchEmbed(in_channels=1) def __init__(self, **kwargs): super().__init__(**kwargs) self.ablation_mask_embedder.create(**kwargs) @staticmethod - def drop_tokens(x, indexes): + def drop_tokens(x: torch.Tensor, indexes) -> torch.Tensor: """ Drops the tokens which correspond to fully masked inputs + :param x: Input data in .... format :param indexes: positions to be ablated - return + :return: Input with tokens dropped where the input was fully ablated. """ x_no_cl, cls_token = x[:, 1:], x[:, 0:1] shape = x_no_cl.shape @@ -111,7 +131,7 @@ def drop_tokens(x, indexes): x_no_cl = torch.reshape(x_no_cl, shape=(shape[0], -1, shape[-1])) return torch.cat((cls_token, x_no_cl), dim=1) - def forward_features(self, x): + def forward_features(self, x: torch.Tensor) -> torch.Tensor: """ The forward pass of the ViT. #TODO! check for 1 channel inputs! @@ -199,9 +219,11 @@ def __init__( if type(model) is str: model = timm.create_model(model, pretrained=load_pretrained) model.head = torch.nn.Linear(model.head.in_features, nb_classes) - # TODO: enable users to pass in opt hyperparameters - # optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0001) - optimizer = optimizer(model.parameters(), **optimizer_params) + if optimizer is not None: + if optimizer_params is not None: + optimizer = optimizer(model.parameters(), **optimizer_params) + else: + raise ValueError("If providing an optimiser please also supply its parameters") else: pretrained_cfg = model.pretrained_cfg @@ -223,6 +245,13 @@ def __init__( raise ValueError("Optimiser not supported for conversion") converted_optimizer.load_state_dict(opt_state_dict) + self.to_reshape = False + if model.default_cfg['input_size'] != input_shape: + print(f"ViT expects input shape of {model.default_cfg['input_size']}, " + f"but {input_shape} specified as the input shape. " + f"The input will be rescaled to {model.default_cfg['input_size']}") + self.to_reshape = True + super().__init__( model=model, loss=loss, @@ -244,7 +273,9 @@ def __init__( print(self.model) self.ablator = ColumnAblator(ablation_size=ablation_size, channels_first=True, - row_ablation_mode=False) + to_reshape=self.to_reshape, + original_shape=input_shape, + output_shape=model.default_cfg['input_size']) @staticmethod def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> ArtViT: @@ -264,14 +295,14 @@ def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwar **kwargs, ) - def update_batchnorm(self, x: np.ndarray, batch_size: int) -> None: + def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) -> None: """ Method to update the batchnorm of a ViT on small datasets - :param x: + :param x: Training data. :param batch_size: Size of batches. + :param nb_epochs: How many times to forward pass over the input data """ import random - import time self.model.train() @@ -279,14 +310,12 @@ def update_batchnorm(self, x: np.ndarray, batch_size: int) -> None: num_batch = int(len(x) / float(batch_size)) print('updating batchnorm') - s = time.time() with torch.no_grad(): - for _ in tqdm(range(1)): + for _ in tqdm(range(nb_epochs)): for m in tqdm(range(num_batch)): i_batch = torch.from_numpy(np.copy(x[ind[m * batch_size: (m + 1) * batch_size]])).to(device) i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) _ = self.model(i_batch) - print('total time taken is ', time.time() - s) def fit( # pylint: disable=W0221 self, @@ -475,4 +504,4 @@ def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndar if isinstance(labels, torch.Tensor): labels = labels.detach().cpu().numpy() - return np.sum(np.argmax(preds, axis=1) == labels) / len(labels) \ No newline at end of file + return np.sum(np.argmax(preds, axis=1) == labels) / len(labels) diff --git a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py index b634393a93..1c15cc1e7f 100644 --- a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py +++ b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py @@ -8,13 +8,32 @@ """ import torch +from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + class UpSampler(torch.nn.Module): - def __init__(self, input_size, final_size): + """ + Resizes datasets to the specified size. + Usually for upscaling datasets like CIFAR to Imagenet format + """ + def __init__(self, input_size: int, final_size: int) -> None: + """ + Creates an upsampler to make the supplied data match the pre-trained ViT format + :param input_size: Size of the current input data + :param final_size: Desired final size + """ super(UpSampler, self).__init__() self.upsample = torch.nn.Upsample(scale_factor=final_size/input_size) - def forward(self, x): + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Forward pass though the upsampler. + + :param x: Input data + :return: The upsampled input data + """ return self.upsample(x) @@ -22,14 +41,32 @@ class ColumnAblator(torch.nn.Module): """ Pure Pytorch implementation of stripe/column ablation. """ - def __init__(self, ablation_size: int, channels_first: bool, row_ablation_mode: bool = False): + def __init__(self, ablation_size: int, channels_first: bool, + to_reshape: bool = False, original_shape: Optional[Tuple] = None, + output_shape: Optional[Tuple] = None): + """ + Creates a column ablator + + :param ablation_size: The size of the column we will retain. + :param channels_first: If the input is in channels first format. Currently required to be True. + :param to_reshape: If the input requires reshaping. + :param original_shape: Original shape of the input. + :param output_shape: Input shape expected by the ViT. Usually means upscaling the input to 224 x 224. + """ super().__init__() self.ablation_size = ablation_size self.channels_first = channels_first - self.row_ablation_mode = row_ablation_mode - self.upsample = UpSampler(input_size=32, final_size=224) + self.to_reshape = to_reshape + self.upsample = UpSampler(input_size=original_shape[1], final_size=output_shape[1]) - def ablate(self, x, column_pos): + def ablate(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: + """ + Ablates the input colum wise + + :param x: Input data + :param column_pos: The start position of the albation + :return: The ablated input with 0s where the ablation occurred + """ k = self.ablation_size if column_pos + k > x.shape[-1]: x[:, :, :, (column_pos + k) % x.shape[-1]:column_pos] = 0.0 @@ -38,19 +75,37 @@ def ablate(self, x, column_pos): x[:, :, :, column_pos + k:] = 0.0 return x - def forward(self, x, column_pos): + def forward(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: + """ + Forward pass though the ablator. We insert a new channel to keep track of the ablation location. + + :param x: Input data + :param column_pos: The start position of the albation + :return: The albated input with an extra channel indicating the location of the ablation + """ assert x.shape[1] == 3 - ones = torch.torch.ones_like(x[:, 0:1, :, :]).cuda() + ones = torch.torch.ones_like(x[:, 0:1, :, :]).to(device) x = torch.cat([x, ones], dim=1) x = self.ablate(x, column_pos=column_pos) - return self.upsample(x) + if self.to_reshape: + x = self.upsample(x) + return x + + def certify(self, pred_counts: torch.Tensor, size_to_certify: int, label: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Performs certification of the predictions - def certify(self, predictions, size_to_certify, label): + :param pred_counts: The model predictions over the ablated data. + :param size_to_certify: The patch size we wish to check certification against + :param label: The ground truth labels + :return: A tuple consisting of: the certified predictions, the predictions which were certified and also correct, + and the most predicted class across the different ablations on the input + """ - num_of_classes = predictions.shape[-1] + num_of_classes = pred_counts.shape[-1] - top_class_counts, top_predicted_class = predictions.kthvalue(num_of_classes, dim=1) - second_class_counts, second_predicted_class = predictions.kthvalue(num_of_classes - 1, dim=1) + top_class_counts, top_predicted_class = pred_counts.kthvalue(num_of_classes, dim=1) + second_class_counts, second_predicted_class = pred_counts.kthvalue(num_of_classes - 1, dim=1) cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) diff --git a/requirements_test.txt b/requirements_test.txt index 7346645fd6..87add272e9 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -34,6 +34,9 @@ torch==1.13.1 torchaudio==0.13.1+cpu torchvision==0.14.1+cpu +# PyTorch image transformers +timm @ git+https://github.com/huggingface/pytorch-image-models.git@9fcc01930aae865ec9ef8aae8849ca2ba241f816 + catboost==1.1.1 GPy==1.10.0 lightgbm==3.3.5 From 548dafde5eb1b82196487fb9f608d000af9fb75b Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Fri, 19 May 2023 16:11:28 +0100 Subject: [PATCH 08/55] updating test script Signed-off-by: GiulioZizzo --- dev.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/dev.py b/dev.py index 48c5a932b9..28c828a5c0 100644 --- a/dev.py +++ b/dev.py @@ -1,5 +1,6 @@ import torch - +import ssl +ssl._create_default_https_context = ssl._create_unverified_context from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT, ArtViT import numpy as np from torchvision import datasets @@ -34,16 +35,17 @@ def get_cifar_data(): art_model = PyTorchSmoothedViT(model='vit_small_patch16_224', loss=torch.nn.CrossEntropyLoss(), - input_shape=(3, 224, 224), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), nb_classes=10, ablation_type='column', ablation_size=4, threshold=0.01, - logits=True, load_pretrained=True) scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1) -art_model.fit(x_train, y_train, nb_epochs=30, update_batchnorm=True, scheduler=scheduler) -torch.save(art_model.model.state_dict(), 'trained.pt') -art_model.model.load_state_dict(torch.load('trained.pt')) +# art_model.fit(x_train, y_train, nb_epochs=30, update_batchnorm=True, scheduler=scheduler) +# torch.save(art_model.model.state_dict(), 'trained.pt') +# art_model.model.load_state_dict(torch.load('trained.pt')) art_model.eval_and_certify(x_train, y_train) From d46102126c5f41b56a58a9e892f43954857ffa76 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Mon, 22 May 2023 12:56:03 +0000 Subject: [PATCH 09/55] more flexible inputs, and fixing mypy errors Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/pytorch.py | 212 +++++++++++------- .../smooth_vit.py | 45 +++- 2 files changed, 161 insertions(+), 96 deletions(-) diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers/pytorch.py index 31ae221688..173399d1ba 100644 --- a/art/estimators/certification/smoothed_vision_transformers/pytorch.py +++ b/art/estimators/certification/smoothed_vision_transformers/pytorch.py @@ -1,3 +1,28 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +""" +This module implements Certified Patch Robustness via Smoothed Vision Transformers + +| Paper link Accepted version: + https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf + +| Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf +""" from __future__ import absolute_import, division, print_function, unicode_literals import logging @@ -5,7 +30,6 @@ from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING import random import torch -import copy from timm.models.vision_transformer import VisionTransformer @@ -16,12 +40,17 @@ from art.estimators.certification.smoothed_vision_transformers.smooth_vit import ColumnAblator from art.utils import check_and_transform_label_format +if TYPE_CHECKING: + from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE + from art.defences.preprocessor import Preprocessor + from art.defences.postprocessor import Postprocessor + logger = logging.getLogger(__name__) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") class PatchEmbed(torch.nn.Module): - """ Image to Patch Embedding + """Image to Patch Embedding Class adapted from the implementation in https://github.com/MadryLab/smoothed-vit @@ -50,11 +79,12 @@ class PatchEmbed(torch.nn.Module): SOFTWARE. """ - def __init__(self, patch_size=16, in_channels=1, embed_dim=768): + + def __init__(self, patch_size: int=16, in_channels: int=1, embed_dim:int=768): """ Specifies the configuration for the convolutional layer. :param patch_size: The patch size used by the ViT - :param in_chans: Number of input channels. + :param in_channels: Number of input channels. :param embed_dim: The embedding dimension used by the ViT """ @@ -78,11 +108,13 @@ def create(self, patch_size=None, embed_dim=None, **kwargs) -> None: if embed_dim is not None: self.embed_dim = embed_dim - self.proj = torch.nn.Conv2d(in_channels=self.in_channels, - out_channels=self.embed_dim, - kernel_size=self.patch_size, - stride=self.patch_size, - bias=False) + self.proj = torch.nn.Conv2d( + in_channels=self.in_channels, + out_channels=self.embed_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + bias=False, + ) w_shape = self.proj.weight.shape self.proj.weight = torch.nn.Parameter(torch.ones(w_shape).to(device)) @@ -95,13 +127,14 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: :param x: Input data corresponding to the ablation mask :return: The embedded input """ - with torch.no_grad(): - x = self.proj(x).flatten(2).transpose(1, 2) - return x + if self.proj is not None: + with torch.no_grad(): + x = self.proj(x).flatten(2).transpose(1, 2) + return x + raise ValueError("Projection layer not yet created.") class ArtViT(VisionTransformer): - # Make as a class attribute to avoid being included in the # state dictionaries of the ViT Model. ablation_mask_embedder = PatchEmbed(in_channels=1) @@ -151,15 +184,6 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor: ones = self.ablation_mask_embedder(ablation_mask) to_drop = torch.sum(ones, dim=2) indexes = torch.gt(torch.where(to_drop > 1, 1, 0), 0) - - check_i = indexes[0] - check_val = to_drop[0] - for i, s in zip(indexes, to_drop): - if not torch.equal(check_i, i): - for ci, ei, val, cval in zip(check_i, i, s, check_val): - print(f'{ci} with {cval} vs {ei} with {val}') - sys.exit() - x = self.drop_tokens(x, indexes) x = self.blocks(x) @@ -169,14 +193,14 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor: class PyTorchSmoothedViT(PyTorchClassifier): def __init__( self, - model: ["VisionTransformer", str], + model: Union[VisionTransformer, str], loss: "torch.nn.modules.loss._Loss", input_shape: Tuple[int, ...], nb_classes: int, ablation_type: str, ablation_size: int, threshold: float, - optimizer: Optional["torch.optim.Optimizer"] = None, # type: ignore + optimizer: Union[type, "torch.optim.Optimizer", None] = None, optimizer_params: Optional[dict] = None, channels_first: bool = True, clip_values: Optional["CLIP_VALUES_TYPE"] = None, @@ -215,24 +239,26 @@ def __init__( import timm timm.models.vision_transformer._create_vision_transformer = self.art_create_vision_transformer - - if type(model) is str: + if isinstance(model, str): model = timm.create_model(model, pretrained=load_pretrained) model.head = torch.nn.Linear(model.head.in_features, nb_classes) - if optimizer is not None: + if isinstance(optimizer, type): if optimizer_params is not None: optimizer = optimizer(model.parameters(), **optimizer_params) else: raise ValueError("If providing an optimiser please also supply its parameters") - else: + elif isinstance(model, VisionTransformer): pretrained_cfg = model.pretrained_cfg supplied_state_dict = model.state_dict() - model = timm.create_model(pretrained_cfg['vit_small_patch16_224'], pretrained=load_pretrained) - model.load_state_dict(torch.load(supplied_state_dict)) + model = timm.create_model(pretrained_cfg["architecture"], pretrained=load_pretrained) + model.load_state_dict(supplied_state_dict) model.head = torch.nn.Linear(model.head.in_features, nb_classes) if optimizer is not None: + if not isinstance(optimizer, torch.optim.Optimizer): + raise ValueError("Optimizer error: must be a torch.optim.Optimizer instance") + converted_optimizer: Union[torch.optim.Adam, torch.optim.SGD] opt_state_dict = optimizer.state_dict() if isinstance(optimizer, torch.optim.Adam): @@ -246,36 +272,46 @@ def __init__( converted_optimizer.load_state_dict(opt_state_dict) self.to_reshape = False - if model.default_cfg['input_size'] != input_shape: - print(f"ViT expects input shape of {model.default_cfg['input_size']}, " - f"but {input_shape} specified as the input shape. " - f"The input will be rescaled to {model.default_cfg['input_size']}") - self.to_reshape = True - - super().__init__( - model=model, - loss=loss, - input_shape=input_shape, - nb_classes=nb_classes, - optimizer=optimizer, - channels_first=channels_first, - clip_values=clip_values, - preprocessing_defences=preprocessing_defences, - postprocessing_defences=postprocessing_defences, - preprocessing=preprocessing, - device_type=device_type, - ) + if isinstance(model, ArtViT): + if model.default_cfg["input_size"] != input_shape: + print( + f"ViT expects input shape of {model.default_cfg['input_size']}, " + f"but {input_shape} specified as the input shape. " + f"The input will be rescaled to {model.default_cfg['input_size']}" + ) + self.to_reshape = True + else: + raise ValueError("Vision transformer is not of ArtViT. Error occurred in ArtViT creation.") + + if optimizer is None or isinstance(optimizer, torch.optim.Optimizer): + super().__init__( + model=model, + loss=loss, + input_shape=input_shape, + nb_classes=nb_classes, + optimizer=optimizer, + channels_first=channels_first, + clip_values=clip_values, + preprocessing_defences=preprocessing_defences, + postprocessing_defences=postprocessing_defences, + preprocessing=preprocessing, + device_type=device_type, + ) + else: + raise ValueError("opt error") self.ablation_type = ablation_type - self.ablation_size = ablation_size, + self.ablation_size = (ablation_size,) self.threshold = threshold print(self.model) - self.ablator = ColumnAblator(ablation_size=ablation_size, - channels_first=True, - to_reshape=self.to_reshape, - original_shape=input_shape, - output_shape=model.default_cfg['input_size']) + self.ablator = ColumnAblator( + ablation_size=ablation_size, + channels_first=True, + to_reshape=self.to_reshape, + original_shape=input_shape, + output_shape=model.default_cfg["input_size"], + ) @staticmethod def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> ArtViT: @@ -289,8 +325,11 @@ def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwar from timm.models._builder import build_model_with_cfg from timm.models.vision_transformer import checkpoint_filter_fn + return build_model_with_cfg( - ArtViT, variant, pretrained, + ArtViT, + variant, + pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) @@ -309,26 +348,25 @@ def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) - ind = np.arange(len(x)) num_batch = int(len(x) / float(batch_size)) - print('updating batchnorm') with torch.no_grad(): for _ in tqdm(range(nb_epochs)): for m in tqdm(range(num_batch)): - i_batch = torch.from_numpy(np.copy(x[ind[m * batch_size: (m + 1) * batch_size]])).to(device) + i_batch = torch.from_numpy(np.copy(x[ind[m * batch_size : (m + 1) * batch_size]])).to(device) i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) _ = self.model(i_batch) def fit( # pylint: disable=W0221 - self, - x: np.ndarray, - y: np.ndarray, - batch_size: int = 128, - nb_epochs: int = 10, - training_mode: bool = True, - drop_last: bool = False, - scheduler: Optional[Any] = None, - update_batchnorm: bool = True, - verbose: bool = True, - **kwargs, + self, + x: np.ndarray, + y: np.ndarray, + batch_size: int = 128, + nb_epochs: int = 10, + training_mode: bool = True, + drop_last: bool = False, + scheduler: Optional[Any] = None, + update_batchnorm: bool = True, + verbose: bool = True, + **kwargs, ) -> None: """ Fit the classifier on the training set `(x, y)`. @@ -342,8 +380,9 @@ def fit( # pylint: disable=W0221 the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) :param scheduler: Learning rate scheduler to run at the start of every epoch. - :param update_batchnorm: ... - :param verbose: ... + :param update_batchnorm: if to run the training data through the model to update any batch norm statistics prior + to training. Useful on small datasets when using pre-trained ViTs. + :param verbose: if to display training progress bars :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch and providing it takes no effect. """ @@ -358,11 +397,7 @@ def fit( # pylint: disable=W0221 import torchvision.transforms as transforms y = check_and_transform_label_format(y, nb_classes=self.nb_classes) - transform = transforms.Compose( - [ - transforms.RandomHorizontalFlip() - ] - ) + transform = transforms.Compose([transforms.RandomHorizontalFlip()]) # Apply preprocessing x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) @@ -391,12 +426,13 @@ def fit( # pylint: disable=W0221 # Train for one epoch for m in pbar: - i_batch = torch.from_numpy(np.copy(x_preprocessed[ind[m * batch_size: (m + 1) * batch_size]])).to(self._device) + i_batch = torch.from_numpy(np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]])).to( + self._device + ) i_batch = transform(i_batch) i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) - o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size: (m + 1) * batch_size]]).to( - self._device) + o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device) # Zero the parameter gradients self._optimizer.zero_grad() @@ -432,8 +468,7 @@ def fit( # pylint: disable=W0221 if verbose: pbar.set_description( - f"Loss {torch.mean(torch.stack(epoch_loss)):.2f}" - f" Acc {np.mean(epoch_acc):.2f}" + f"Loss {torch.mean(torch.stack(epoch_loss)):.2f}" f" Acc {np.mean(epoch_acc):.2f}" ) if scheduler is not None: @@ -468,12 +503,13 @@ def eval_and_certify(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128): cert_acc = [] with torch.no_grad(): for m in pbar: - i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size: (m + 1) * batch_size])).to(self._device) - o_batch = torch.from_numpy(y_preprocessed[m * batch_size: (m + 1) * batch_size]).to(self._device) + i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size : (m + 1) * batch_size])).to( + self._device + ) + o_batch = torch.from_numpy(y_preprocessed[m * batch_size : (m + 1) * batch_size]).to(self._device) predictions = [] pred_counts = torch.zeros((batch_size, self.nb_classes)).to(self._device) for pos in range(i_batch.shape[-1]): - ablated_batch = self.ablator.forward(i_batch, column_pos=pos) # Perform prediction @@ -481,13 +517,15 @@ def eval_and_certify(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128): pred_counts[np.arange(0, batch_size), model_outputs.argmax(dim=-1)] += 1 predictions.append(model_outputs) - cert, cert_and_correct, top_predicted_class = self.ablator.certify(pred_counts, size_to_certify=4, label=o_batch) + cert, cert_and_correct, top_predicted_class = self.ablator.certify( + pred_counts, size_to_certify=4, label=o_batch + ) cert_acc.append(torch.sum(cert_and_correct) / batch_size) acc = torch.sum(top_predicted_class == o_batch) / batch_size accuracy.append(acc) - print('Normal Acc: ', torch.mean(torch.stack(accuracy))) - print('Cert Normal Acc: ', torch.mean(torch.stack(cert_acc))) + print("Normal Acc: ", torch.mean(torch.stack(accuracy))) + print("Cert Normal Acc: ", torch.mean(torch.stack(cert_acc))) @staticmethod def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: diff --git a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py index 1c15cc1e7f..1923e64455 100644 --- a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py +++ b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py @@ -1,3 +1,20 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. """ This module implements Certified Patch Robustness via Smoothed Vision Transformers @@ -8,7 +25,7 @@ """ import torch -from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING +from typing import Optional, Tuple device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -18,6 +35,7 @@ class UpSampler(torch.nn.Module): Resizes datasets to the specified size. Usually for upscaling datasets like CIFAR to Imagenet format """ + def __init__(self, input_size: int, final_size: int) -> None: """ Creates an upsampler to make the supplied data match the pre-trained ViT format @@ -25,7 +43,7 @@ def __init__(self, input_size: int, final_size: int) -> None: :param final_size: Desired final size """ super(UpSampler, self).__init__() - self.upsample = torch.nn.Upsample(scale_factor=final_size/input_size) + self.upsample = torch.nn.Upsample(scale_factor=final_size / input_size) def forward(self, x: torch.Tensor) -> torch.Tensor: """ @@ -41,9 +59,15 @@ class ColumnAblator(torch.nn.Module): """ Pure Pytorch implementation of stripe/column ablation. """ - def __init__(self, ablation_size: int, channels_first: bool, - to_reshape: bool = False, original_shape: Optional[Tuple] = None, - output_shape: Optional[Tuple] = None): + + def __init__( + self, + ablation_size: int, + channels_first: bool, + to_reshape: bool = False, + original_shape: Optional[Tuple] = None, + output_shape: Optional[Tuple] = None, + ): """ Creates a column ablator @@ -57,7 +81,8 @@ def __init__(self, ablation_size: int, channels_first: bool, self.ablation_size = ablation_size self.channels_first = channels_first self.to_reshape = to_reshape - self.upsample = UpSampler(input_size=original_shape[1], final_size=output_shape[1]) + if original_shape is not None and output_shape is not None: + self.upsample = UpSampler(input_size=original_shape[1], final_size=output_shape[1]) def ablate(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: """ @@ -69,10 +94,10 @@ def ablate(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: """ k = self.ablation_size if column_pos + k > x.shape[-1]: - x[:, :, :, (column_pos + k) % x.shape[-1]:column_pos] = 0.0 + x[:, :, :, (column_pos + k) % x.shape[-1] : column_pos] = 0.0 else: x[:, :, :, :column_pos] = 0.0 - x[:, :, :, column_pos + k:] = 0.0 + x[:, :, :, column_pos + k :] = 0.0 return x def forward(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: @@ -91,7 +116,9 @@ def forward(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: x = self.upsample(x) return x - def certify(self, pred_counts: torch.Tensor, size_to_certify: int, label: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + def certify( + self, pred_counts: torch.Tensor, size_to_certify: int, label: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Performs certification of the predictions From c83e8239733336aa1890d00d597572c9d68d945d Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Mon, 22 May 2023 16:06:54 +0000 Subject: [PATCH 10/55] initial notebook Signed-off-by: GiulioZizzo --- notebooks/smoothed_vision_transformers.ipynb | 118 +++++++++++++++++++ 1 file changed, 118 insertions(+) create mode 100644 notebooks/smoothed_vision_transformers.ipynb diff --git a/notebooks/smoothed_vision_transformers.ipynb b/notebooks/smoothed_vision_transformers.ipynb new file mode 100644 index 0000000000..a1f4eea984 --- /dev/null +++ b/notebooks/smoothed_vision_transformers.ipynb @@ -0,0 +1,118 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "58063edd", + "metadata": {}, + "source": [ + "# Certification of Vision Transformers" + ] + }, + { + "cell_type": "markdown", + "id": "0438abb9", + "metadata": {}, + "source": [ + "In this notebook we will go over how to use the PyTorchSmoothedViT tool and be able to certify vision transformers against patch attacks!\n", + "\n", + "### Overview\n", + "\n", + "This was introduced in Certified Patch Robustness via Smoothed Vision Transformers (https://arxiv.org/abs/2110.07719). The core technique is one of *image ablations*, where the image is blanked out except for certain regions. By ablating the input in different ways every time we can obtain many predicitons for a single input. Now, as we are ablating large parts of the image the attacker's patch attack is also getting removed in many predictions. Based on factors like the size of the adversarial patch and the size of the retained part of the image the attacker will only be able to influence a limited number of predictions. In fact, if the attacker has a m x m patch attack and the retained part of the image is a column of width s then the maximum number of predictions that could be affected are: \n", + "\n", + "$\n", + "insert equation here\n", + "$\n", + "\n", + "Based on this relationship we can derive a simple but effective criterion that if we are making many predictions for an image and the highest predicted class $c_t$ has been predicted $k$ times and the second most predicted class $c_{t-1}$ has been predicted $k_{t-1}$ times then we have a certified prediction if: \n", + "\n", + "\n", + "$insert here$\n", + "\n", + "Intuitivly we are saying that even if $k$ predictions were adversarially influenced and those predictions were to change, then the model will *still* have predicted class $c_t$.\n", + "\n", + "### What's special about Vision Transformers?\n", + "\n", + "The formulation above is very generic and it can be applied to any nerual network model, in fact the original paper which proposed it () considered the case with convolutional nerual networks. \n", + "\n", + "However, Vision Transformers (or ViTs) are well siuted to this task of predicting with vision ablations for two key reasons: \n", + "\n", + "+ ViTs first tokenize the input into a series of image regions which get embedded and then processed through the neural network. Thus, by considering the input as a set of tokens we can drop tokens which correspond to fully masked (i.e ablated)regions significantly saving on the compute costs. \n", + "\n", + "+ Secondly, the ViT's self attention layer enables sharing of information globally at every layer. In contrast convolutional neural networks build up the receptive field over a series of layers. Hence, ViTs can be more effective at classifying an image based on its small unablated regions.\n", + "\n", + "Let's see how to use these tools!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aeb27667", + "metadata": {}, + "outputs": [], + "source": [ + "# The core tool is PyTorchSmoothedViT which can be imported as follows:\n", + "from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "353ef5a6", + "metadata": {}, + "outputs": [], + "source": [ + "# There are a few ways we can interface with it. \n", + "# The most direct way to get setup is by specifying the name of a supported transformer.\n", + "# Behind the scenes we are using the timm library (link: ) so any ViT supported by that libary will work.\n", + "\n", + "art_model = PyTorchSmoothedViT(model='vit_small_patch16_224', # Name of the model acitecture to load\n", + " loss=torch.nn.CrossEntropyLoss(), # loss function to use\n", + " optimizer=torch.optim.SGD, # the optimizer to use: note! this is not initialised here we just supply the class!\n", + " optimizer_params={\"lr\": 0.01}, # the parameters to use\n", + " input_shape=(3, 32, 32), # the input shape of the data: Note! ...\n", + " nb_classes=10,\n", + " ablation_size=4,\n", + " load_pretrained=True)" + ] + }, + { + "cell_type": "markdown", + "id": "c7a4255f", + "metadata": {}, + "source": [ + "Creating a PyTorchSmoothedViT instance with the above code follows many of the general ART patterns with two caveats: \n", + "+ The optimizer would (normally) be supplied initialised into the estimator along with a pytorch model. However, here we have not yet created the model, we are just supplying the model architecture name. Hence, here we pass the class into PyTorchSmoothedViT with the keyword arguments in optimizer_params which you would normally use to initialise it.\n", + "+ The input shape..." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e7253ce1", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.14" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From a502a8224b098f81340f798ddbdabf6f701987b3 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 24 May 2023 20:02:10 +0100 Subject: [PATCH 11/55] adding testing and updated input args Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/__init__.py | 2 +- .../smoothed_vision_transformers/pytorch.py | 127 ++++++++----- .../smooth_vit.py | 12 +- dev.py | 17 +- notebooks/smoothed_vision_transformers.ipynb | 90 ++++++++- .../certification/test_smooth_vit.py | 174 ++++++++++++++++++ 6 files changed, 358 insertions(+), 64 deletions(-) create mode 100644 tests/estimators/certification/test_smooth_vit.py diff --git a/art/estimators/certification/smoothed_vision_transformers/__init__.py b/art/estimators/certification/smoothed_vision_transformers/__init__.py index fc9a45bb4f..fd2b959474 100644 --- a/art/estimators/certification/smoothed_vision_transformers/__init__.py +++ b/art/estimators/certification/smoothed_vision_transformers/__init__.py @@ -1 +1 @@ -from art.estimators.certification.smoothed_vision_transformers.pytorch import PyTorchSmoothedViT, ArtViT \ No newline at end of file +from art.estimators.certification.smoothed_vision_transformers.pytorch import PyTorchSmoothedViT \ No newline at end of file diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers/pytorch.py index 173399d1ba..c960001c8e 100644 --- a/art/estimators/certification/smoothed_vision_transformers/pytorch.py +++ b/art/estimators/certification/smoothed_vision_transformers/pytorch.py @@ -26,7 +26,6 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -import sys from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING import random import torch @@ -41,6 +40,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: + import torchvision from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.defences.preprocessor import Preprocessor from art.defences.postprocessor import Postprocessor @@ -80,13 +80,13 @@ class PatchEmbed(torch.nn.Module): """ - def __init__(self, patch_size: int=16, in_channels: int=1, embed_dim:int=768): + def __init__(self, patch_size: int = 16, in_channels: int = 1, embed_dim: int = 768): """ Specifies the configuration for the convolutional layer. - :param patch_size: The patch size used by the ViT - :param in_channels: Number of input channels. - :param embed_dim: The embedding dimension used by the ViT + :param patch_size: The patch size used by the ViT. + :param in_channels: Number of input channels. + :param embed_dim: The embedding dimension used by the ViT. """ super().__init__() self.patch_size = patch_size @@ -94,13 +94,14 @@ def __init__(self, patch_size: int=16, in_channels: int=1, embed_dim:int=768): self.embed_dim = embed_dim self.proj: Optional[torch.nn.Conv2d] = None - def create(self, patch_size=None, embed_dim=None, **kwargs) -> None: + def create(self, patch_size=None, embed_dim=None, **kwargs) -> None: # pylint: disable=W0613 """ Creates a convolution that mimics the embedding layer to be used for the ablation mask to track where the image was ablated. :param patch_size: The patch size used by the ViT :param embed_dim: The embedding dimension used by the ViT + :param kwargs: Handles the remaining kwargs from the ViT configuration. """ if patch_size is not None: @@ -135,16 +136,28 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class ArtViT(VisionTransformer): + """ + Art class inheriting from VisionTransformer to control the forward pass of the ViT. + """ # Make as a class attribute to avoid being included in the # state dictionaries of the ViT Model. ablation_mask_embedder = PatchEmbed(in_channels=1) def __init__(self, **kwargs): + """ + Create a ArtViT instance + :param **kwargs: keyword arguments required to create the mask embedder. + """ + self.to_drop_tokens = kwargs['drop_tokens'] + del kwargs['drop_tokens'] super().__init__(**kwargs) self.ablation_mask_embedder.create(**kwargs) + self.in_chans = kwargs['in_chans'] + self.img_size = kwargs['img_size'] + @staticmethod - def drop_tokens(x: torch.Tensor, indexes) -> torch.Tensor: + def drop_tokens(x: torch.Tensor, indexes: torch.Tensor) -> torch.Tensor: """ Drops the tokens which correspond to fully masked inputs @@ -158,8 +171,7 @@ def drop_tokens(x: torch.Tensor, indexes) -> torch.Tensor: # reshape to temporarily remove batch x_no_cl = torch.reshape(x_no_cl, shape=(-1, shape[-1])) indexes = torch.reshape(indexes, shape=(-1,)) - indexes = (indexes == True).nonzero(as_tuple=True)[0] - + indexes = indexes.nonzero(as_tuple=True)[0] x_no_cl = torch.index_select(x_no_cl, dim=0, index=indexes) x_no_cl = torch.reshape(x_no_cl, shape=(shape[0], -1, shape[-1])) return torch.cat((cls_token, x_no_cl), dim=1) @@ -167,20 +179,21 @@ def drop_tokens(x: torch.Tensor, indexes) -> torch.Tensor: def forward_features(self, x: torch.Tensor) -> torch.Tensor: """ The forward pass of the ViT. - #TODO! check for 1 channel inputs! - :param x: Input data. - + :return: The input processed by the ViT backbone """ - drop_tokens = True - if x.shape[1] == 4: - x, ablation_mask = x[:, :3], x[:, 3:4] + ablated_input = False + if x.shape[1] == self.in_chans + 1: + ablated_input = True + + if ablated_input: + x, ablation_mask = x[:, :self.in_chans], x[:, self.in_chans:self.in_chans + 1] x = self.patch_embed(x) x = self._pos_embed(x) - if drop_tokens: + if self.to_drop_tokens and ablated_input: ones = self.ablation_mask_embedder(ablation_mask) to_drop = torch.sum(ones, dim=2) indexes = torch.gt(torch.where(to_drop > 1, 1, 0), 0) @@ -191,15 +204,24 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor: class PyTorchSmoothedViT(PyTorchClassifier): + """ + Implementation of Certified Patch Robustness via Smoothed Vision Transformers + + | Paper link Accepted version: + https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf + + | Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf + """ def __init__( self, model: Union[VisionTransformer, str], loss: "torch.nn.modules.loss._Loss", input_shape: Tuple[int, ...], nb_classes: int, - ablation_type: str, ablation_size: int, - threshold: float, + replace_last_layer: bool, + drop_tokens: bool = True, + load_pretrained: bool = True, optimizer: Union[type, "torch.optim.Optimizer", None] = None, optimizer_params: Optional[dict] = None, channels_first: bool = True, @@ -208,7 +230,6 @@ def __init__( postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), device_type: str = "gpu", - load_pretrained: bool = True, ): """ Create a smoothed ViT classifier. @@ -219,10 +240,13 @@ def __init__( categorical, i.e. not converted to one-hot encoding. :param input_shape: The shape of one input instance. :param nb_classes: The number of classes of the model. - :param ablation_type: The type of ablation to perform, must be either "column" or "block" - :param ablation_size: The size of the data portion to retain after ablation. Will be a column of size N for - "column" ablation type or a NxN square for ablation of type "block" - :param threshold: The minimum threshold to count a prediction. + :param ablation_size: The size of the data portion to retain after ablation. + :param replace_last_layer: If to replace the last layer of the ViT with a fresh layer matching the number + of classes for the dataset to be examined. Needed if going from the pre-trained + imagenet models to fine-tune on a dataset like CIFAR. + :param drop_tokens: If to drop the fully ablated tokens in the ViT + :param load_pretrained: If to load a pretrained model matching the ViT name. Will only affect the ViT if a + string name is passed to model rather than a ViT directly. :param optimizer: The optimizer used to train the classifier. :param channels_first: Set channels first or last. :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and @@ -240,8 +264,9 @@ def __init__( timm.models.vision_transformer._create_vision_transformer = self.art_create_vision_transformer if isinstance(model, str): - model = timm.create_model(model, pretrained=load_pretrained) - model.head = torch.nn.Linear(model.head.in_features, nb_classes) + model = timm.create_model(model, pretrained=load_pretrained, drop_tokens=drop_tokens) + if replace_last_layer: + model.head = torch.nn.Linear(model.head.in_features, nb_classes) if isinstance(optimizer, type): if optimizer_params is not None: optimizer = optimizer(model.parameters(), **optimizer_params) @@ -251,9 +276,10 @@ def __init__( elif isinstance(model, VisionTransformer): pretrained_cfg = model.pretrained_cfg supplied_state_dict = model.state_dict() - model = timm.create_model(pretrained_cfg["architecture"], pretrained=load_pretrained) + model = timm.create_model(pretrained_cfg["architecture"]) model.load_state_dict(supplied_state_dict) - model.head = torch.nn.Linear(model.head.in_features, nb_classes) + if replace_last_layer: + model.head = torch.nn.Linear(model.head.in_features, nb_classes) if optimizer is not None: if not isinstance(optimizer, torch.optim.Optimizer): @@ -273,6 +299,11 @@ def __init__( self.to_reshape = False if isinstance(model, ArtViT): + + if model.default_cfg['input_size'][0] != input_shape[0]: + raise ValueError(f'ViT requires {model.default_cfg["input_size"][0]} channel input,' + f' but {input_shape[0]} channels were provided.') + if model.default_cfg["input_size"] != input_shape: print( f"ViT expects input shape of {model.default_cfg['input_size']}, " @@ -298,11 +329,9 @@ def __init__( device_type=device_type, ) else: - raise ValueError("opt error") + raise ValueError("Error occurred in optimizer creation") - self.ablation_type = ablation_type self.ablation_size = (ablation_size,) - self.threshold = threshold print(self.model) self.ablator = ColumnAblator( @@ -341,7 +370,6 @@ def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) - :param batch_size: Size of batches. :param nb_epochs: How many times to forward pass over the input data """ - import random self.model.train() @@ -365,6 +393,7 @@ def fit( # pylint: disable=W0221 drop_last: bool = False, scheduler: Optional[Any] = None, update_batchnorm: bool = True, + transform: Optional["torchvision.transforms.transforms.Compose"] = None, verbose: bool = True, **kwargs, ) -> None: @@ -383,10 +412,10 @@ def fit( # pylint: disable=W0221 :param update_batchnorm: if to run the training data through the model to update any batch norm statistics prior to training. Useful on small datasets when using pre-trained ViTs. :param verbose: if to display training progress bars + :param transform: :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch and providing it takes no effect. """ - import torch # Set model mode self._model.train(mode=training_mode) @@ -394,10 +423,7 @@ def fit( # pylint: disable=W0221 if self._optimizer is None: # pragma: no cover raise ValueError("An optimizer is needed to train the model, but none for provided.") - import torchvision.transforms as transforms - y = check_and_transform_label_format(y, nb_classes=self.nb_classes) - transform = transforms.Compose([transforms.RandomHorizontalFlip()]) # Apply preprocessing x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) @@ -429,7 +455,8 @@ def fit( # pylint: disable=W0221 i_batch = torch.from_numpy(np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]])).to( self._device ) - i_batch = transform(i_batch) + if transform is not None: + i_batch = transform(i_batch) i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device) @@ -447,8 +474,7 @@ def fit( # pylint: disable=W0221 "method PyTorchClassifier.fit." ) raise err - # Form the loss function - # print('the model outputs are ', model_outputs.shape) + loss = self.loss(model_outputs, o_batch) acc = self.get_accuracy(preds=model_outputs, labels=o_batch) epoch_acc.append(acc) @@ -476,10 +502,11 @@ def fit( # pylint: disable=W0221 def eval_and_certify(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128): """ - Evaluates the ViT's normal and certified performance over the supplied data - :param x: Evaluation data - :param y: Evaluation labels - :param batch_size: batch size when evaluating + Evaluates the ViT's normal and certified performance over the supplied data. + + :param x: Evaluation data. + :param y: Evaluation labels. + :param batch_size: batch size when evaluating. """ self.model.eval() @@ -517,24 +544,26 @@ def eval_and_certify(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128): pred_counts[np.arange(0, batch_size), model_outputs.argmax(dim=-1)] += 1 predictions.append(model_outputs) - cert, cert_and_correct, top_predicted_class = self.ablator.certify( + _, cert_and_correct, top_predicted_class = self.ablator.certify( pred_counts, size_to_certify=4, label=o_batch ) cert_acc.append(torch.sum(cert_and_correct) / batch_size) acc = torch.sum(top_predicted_class == o_batch) / batch_size accuracy.append(acc) - print("Normal Acc: ", torch.mean(torch.stack(accuracy))) - print("Cert Normal Acc: ", torch.mean(torch.stack(cert_acc))) + pbar.set_description( + f"Normal Acc {torch.mean(torch.stack(accuracy)):.2f} " + f"Cert Acc {torch.mean(torch.stack(cert_acc)):.2f}" + ) @staticmethod def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: """ - Helper function to print out the accuracy during training + Helper function to print out the accuracy during training. - :param preds: (concrete) model predictions - :param labels: ground truth labels (not one hot) - :return: prediction accuracy + :param preds: (concrete) model predictions. + :param labels: ground truth labels (not one hot). + :return: prediction accuracy. """ if isinstance(preds, torch.Tensor): preds = preds.detach().cpu().numpy() diff --git a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py index 1923e64455..838d0bfa8b 100644 --- a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py +++ b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py @@ -23,10 +23,11 @@ | Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf """ -import torch from typing import Optional, Tuple +import torch + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -42,7 +43,7 @@ def __init__(self, input_size: int, final_size: int) -> None: :param input_size: Size of the current input data :param final_size: Desired final size """ - super(UpSampler, self).__init__() + super().__init__() self.upsample = torch.nn.Upsample(scale_factor=final_size / input_size) def forward(self, x: torch.Tensor) -> torch.Tensor: @@ -125,14 +126,15 @@ def certify( :param pred_counts: The model predictions over the ablated data. :param size_to_certify: The patch size we wish to check certification against :param label: The ground truth labels - :return: A tuple consisting of: the certified predictions, the predictions which were certified and also correct, - and the most predicted class across the different ablations on the input + :return: A tuple consisting of: the certified predictions, + the predictions which were certified and also correct, + and the most predicted class across the different ablations on the input. """ num_of_classes = pred_counts.shape[-1] top_class_counts, top_predicted_class = pred_counts.kthvalue(num_of_classes, dim=1) - second_class_counts, second_predicted_class = pred_counts.kthvalue(num_of_classes - 1, dim=1) + second_class_counts, _ = pred_counts.kthvalue(num_of_classes - 1, dim=1) cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) diff --git a/dev.py b/dev.py index 28c828a5c0..2894682d7c 100644 --- a/dev.py +++ b/dev.py @@ -1,9 +1,11 @@ import torch import ssl ssl._create_default_https_context = ssl._create_unverified_context -from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT, ArtViT +from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT import numpy as np from torchvision import datasets +from torchvision import transforms + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -39,13 +41,18 @@ def get_cifar_data(): optimizer_params={"lr": 0.01}, input_shape=(3, 32, 32), nb_classes=10, - ablation_type='column', ablation_size=4, - threshold=0.01, - load_pretrained=True) + replace_last_layer=True, + load_pretrained=True, + ) scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1) -# art_model.fit(x_train, y_train, nb_epochs=30, update_batchnorm=True, scheduler=scheduler) +art_model.fit(x_train, y_train, + nb_epochs=30, + update_batchnorm=True, + scheduler=scheduler, + transform=transforms.Compose([transforms.RandomHorizontalFlip()])) + # torch.save(art_model.model.state_dict(), 'trained.pt') # art_model.model.load_state_dict(torch.load('trained.pt')) art_model.eval_and_certify(x_train, y_train) diff --git a/notebooks/smoothed_vision_transformers.ipynb b/notebooks/smoothed_vision_transformers.ipynb index a1f4eea984..01cbb6829e 100644 --- a/notebooks/smoothed_vision_transformers.ipynb +++ b/notebooks/smoothed_vision_transformers.ipynb @@ -54,6 +54,40 @@ "from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "80541a3a", + "metadata": {}, + "outputs": [], + "source": [ + "# Function to fetch the cifar-10 data\n", + "def get_cifar_data():\n", + " \"\"\"\n", + " Get CIFAR-10 data.\n", + " :return: cifar train/test data.\n", + " \"\"\"\n", + " train_set = datasets.CIFAR10('./data', train=True, download=True)\n", + " test_set = datasets.CIFAR10('./data', train=False, download=True)\n", + "\n", + " x_train = train_set.data.astype(np.float32)\n", + " y_train = np.asarray(train_set.targets)\n", + "\n", + " x_test = test_set.data.astype(np.float32)\n", + " y_test = np.asarray(test_set.targets)\n", + "\n", + " x_train = np.moveaxis(x_train, [3], [1])\n", + " x_test = np.moveaxis(x_test, [3], [1])\n", + "\n", + " x_train = x_train / 255.0\n", + " x_test = x_test / 255.0\n", + "\n", + " return (x_train, y_train), (x_test, y_test)\n", + "\n", + "\n", + "(x_train, y_train), (x_test, y_test) = get_cifar_data()" + ] + }, { "cell_type": "code", "execution_count": null, @@ -61,7 +95,7 @@ "metadata": {}, "outputs": [], "source": [ - "# There are a few ways we can interface with it. \n", + "# There are a few ways we can interface with PyTorchSmoothedViT. \n", "# The most direct way to get setup is by specifying the name of a supported transformer.\n", "# Behind the scenes we are using the timm library (link: ) so any ViT supported by that libary will work.\n", "\n", @@ -72,7 +106,7 @@ " input_shape=(3, 32, 32), # the input shape of the data: Note! ...\n", " nb_classes=10,\n", " ablation_size=4,\n", - " load_pretrained=True)" + " load_pretrained=True) # if to load pre-trained weights for the ViT" ] }, { @@ -82,7 +116,20 @@ "source": [ "Creating a PyTorchSmoothedViT instance with the above code follows many of the general ART patterns with two caveats: \n", "+ The optimizer would (normally) be supplied initialised into the estimator along with a pytorch model. However, here we have not yet created the model, we are just supplying the model architecture name. Hence, here we pass the class into PyTorchSmoothedViT with the keyword arguments in optimizer_params which you would normally use to initialise it.\n", - "+ The input shape..." + "+ The input shape will primiarily determine if the input requires upsampling. The ViT model such as the one loaded is for images of 224 x 224 resolution, thus in our case of using CIFAR data, we will be upsampling it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44975815", + "metadata": {}, + "outputs": [], + "source": [ + "# We can see behind the scenes how PyTorchSmoothedViT processes input by passing in the first few CIFAR\n", + "# images into art_model.ablator.forward along with a start position to retain pixels from the original image.\n", + "ablated = art_model.ablator.forward(x_train[0:10], column_pos=6)\n", + "ablated = ablated.cpu().detach().numpy()\n" ] }, { @@ -91,6 +138,41 @@ "id": "e7253ce1", "metadata": {}, "outputs": [], + "source": [ + "# We can now train the model \n", + "\n", + "scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1)\n", + "art_model.fit(x_train, y_train, nb_epochs=30, update_batchnorm=True, scheduler=scheduler)\n", + "torch.save(art_model.model.state_dict(), 'trained.pt')\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "046b8168", + "metadata": {}, + "outputs": [], + "source": [ + "# Perform certification\n", + "art_model.model.load_state_dict(torch.load('trained.pt'))\n", + "art_model.eval_and_certify(x_test, y_test)" + ] + }, + { + "cell_type": "markdown", + "id": "128ce03a", + "metadata": {}, + "source": [ + "We can also setup the PyTorchSmoothedViT if we start with a ViT model directly.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2f41e078", + "metadata": {}, + "outputs": [], "source": [] } ], @@ -110,7 +192,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.14" + "version": "3.8.10" } }, "nbformat": 4, diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py new file mode 100644 index 0000000000..d6125f33dd --- /dev/null +++ b/tests/estimators/certification/test_smooth_vit.py @@ -0,0 +1,174 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import pytest + +import numpy as np + +from art.utils import load_dataset +from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT +from tests.utils import ARTTestException + + +@pytest.fixture() +def fix_get_mnist_data(): + """ + Get the first 128 samples of the mnist test set with channels first format + + :return: First 128 sample/label pairs of the MNIST test dataset. + """ + nb_test = 128 + + (_, _), (x_test, y_test), _, _ = load_dataset("mnist") + x_test = np.squeeze(x_test).astype(np.float32) + x_test = np.expand_dims(x_test, axis=1) + y_test = np.argmax(y_test, axis=1) + + x_test, y_test = x_test[:nb_test], y_test[:nb_test] + return x_test, y_test + + +@pytest.fixture() +def fix_get_cifar10_data(): + """ + Get the first 128 samples of the cifar10 test set + + :return: First 128 sample/label pairs of the cifar10 test dataset. + """ + nb_test = 128 + + (_, _), (x_test, y_test), _, _ = load_dataset("cifar10") + y_test = np.argmax(y_test, axis=1) + x_test, y_test = x_test[:nb_test], y_test[:nb_test] + x_test = np.transpose(x_test, (0, 3, 1, 2)) # return in channels first format + return x_test.astype(np.float32), y_test + + +@pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") +def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): + """ + Check that the ablation is being performed correctly + """ + from art.estimators.certification.smoothed_vision_transformers.smooth_vit import ColumnAblator + import torch + try: + cifar_data = fix_get_cifar10_data[0] + cifar_labels = fix_get_cifar10_data[1] + + col_ablator = ColumnAblator(ablation_size=4, + channels_first=True, + to_reshape=False, # do not upsample initially + original_shape=(3, 32, 32), + output_shape=(3, 224, 224)) + + cifar_data = torch.from_numpy(cifar_data) + # check that the ablation functioned when in the middle of the image + ablated = col_ablator.forward(cifar_data, column_pos=10) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, :, 0:10]) == 0 + assert torch.sum(ablated[:, :, :, 10:14]) > 0 + assert torch.sum(ablated[:, :, :, 14:]) == 0 + + # check that the ablation wraps when on the edge of the image + ablated = col_ablator.forward(cifar_data, column_pos=30) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, :, 30:]) > 0 + assert torch.sum(ablated[:, :, :, 2:30]) == 0 + assert torch.sum(ablated[:, :, :, :2]) > 0 + + # check that upsampling works as expected + col_ablator = ColumnAblator(ablation_size=4, + channels_first=True, + to_reshape=True, + original_shape=(3, 32, 32), + output_shape=(3, 224, 224)) + + ablated = col_ablator.forward(cifar_data, column_pos=10) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, :, :10*7]) == 0 + assert torch.sum(ablated[:, :, :, 10*7:14*7]) > 0 + assert torch.sum(ablated[:, :, :, 14*7:]) == 0 + + # check that the ablation wraps when on the edge of the image + ablated = col_ablator.forward(cifar_data, column_pos=30) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, :, 30*7:]) > 0 + assert torch.sum(ablated[:, :, :, 2*7:30*7]) == 0 + assert torch.sum(ablated[:, :, :, :2*7]) > 0 + + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") +def test_pytorch_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data): + """ + Check that the training loop for pytorch does not result in errors + """ + import torch + try: + cifar_data = fix_get_cifar10_data[0][0:50] + cifar_labels = fix_get_cifar10_data[1][0:50] + + art_model = PyTorchSmoothedViT(model='vit_small_patch16_224', + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_type='column', + ablation_size=4, + threshold=0.01, + load_pretrained=True) + + scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[1], gamma=0.1) + art_model.fit(cifar_data, cifar_labels, nb_epochs=2, update_batchnorm=True, scheduler=scheduler) + + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") +def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10_data): + """ + Check that the training loop for pytorch does not result in errors + """ + """ + Check that the ablation is being performed correctly + """ + from art.estimators.certification.smoothed_vision_transformers.smooth_vit import ColumnAblator + import torch + + try: + col_ablator = ColumnAblator(ablation_size=4, + channels_first=True, + to_reshape=True, # do not upsample initially + original_shape=(3, 32, 32), + output_shape=(3, 224, 224)) + pred_counts = torch.from_numpy(np.asarray([[20, 5, 1], [10, 5, 1], [1, 16, 1]])) + cert, cert_and_correct, top_predicted_class = col_ablator.certify(pred_counts=pred_counts, + size_to_certify=4, + label=0,) + assert torch.equal(cert, torch.tensor([True, False, True])) + assert torch.equal(cert_and_correct, torch.tensor([True, False, False])) + except ARTTestException as e: + art_warning(e) + From cdedd06d0aa089031cc70a6f52c5ef628c3300d4 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Fri, 26 May 2023 19:06:52 +0000 Subject: [PATCH 12/55] Adding checks to supplied models. New method to get supported models Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/pytorch.py | 101 +++++++++++++++--- .../certification/test_smooth_vit.py | 15 +-- 2 files changed, 93 insertions(+), 23 deletions(-) diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers/pytorch.py index c960001c8e..c6b6090730 100644 --- a/art/estimators/certification/smoothed_vision_transformers/pytorch.py +++ b/art/estimators/certification/smoothed_vision_transformers/pytorch.py @@ -101,7 +101,7 @@ def create(self, patch_size=None, embed_dim=None, **kwargs) -> None: # pylint: :param patch_size: The patch size used by the ViT :param embed_dim: The embedding dimension used by the ViT - :param kwargs: Handles the remaining kwargs from the ViT configuration. + :param kwargs: Handles the remaining kwargs from the ViT configuration. """ if patch_size is not None: @@ -146,7 +146,7 @@ class ArtViT(VisionTransformer): def __init__(self, **kwargs): """ Create a ArtViT instance - :param **kwargs: keyword arguments required to create the mask embedder. + :param kwargs: keyword arguments required to create the mask embedder. """ self.to_drop_tokens = kwargs['drop_tokens'] del kwargs['drop_tokens'] @@ -179,6 +179,7 @@ def drop_tokens(x: torch.Tensor, indexes: torch.Tensor) -> torch.Tensor: def forward_features(self, x: torch.Tensor) -> torch.Tensor: """ The forward pass of the ViT. + :param x: Input data. :return: The input processed by the ViT backbone """ @@ -230,6 +231,7 @@ def __init__( postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), device_type: str = "gpu", + verbose: bool = True, ): """ Create a smoothed ViT classifier. @@ -276,7 +278,11 @@ def __init__( elif isinstance(model, VisionTransformer): pretrained_cfg = model.pretrained_cfg supplied_state_dict = model.state_dict() - model = timm.create_model(pretrained_cfg["architecture"]) + supported_models = self.get_models() + if pretrained_cfg["architecture"] not in supported_models: + raise ValueError('Architecture not supported. Use PyTorchSmoothedViT.get_models() ' + 'to get the supported model architectures.') + model = timm.create_model(pretrained_cfg["architecture"], drop_tokens=drop_tokens) model.load_state_dict(supplied_state_dict) if replace_last_layer: model.head = torch.nn.Linear(model.head.in_features, nb_classes) @@ -298,21 +304,21 @@ def __init__( converted_optimizer.load_state_dict(opt_state_dict) self.to_reshape = False - if isinstance(model, ArtViT): + if not isinstance(model, ArtViT): + raise ValueError("Vision transformer is not of ArtViT. Error occurred in ArtViT creation.") - if model.default_cfg['input_size'][0] != input_shape[0]: - raise ValueError(f'ViT requires {model.default_cfg["input_size"][0]} channel input,' - f' but {input_shape[0]} channels were provided.') + if model.default_cfg['input_size'][0] != input_shape[0]: + raise ValueError(f'ViT requires {model.default_cfg["input_size"][0]} channel input,' + f' but {input_shape[0]} channels were provided.') - if model.default_cfg["input_size"] != input_shape: + if model.default_cfg["input_size"] != input_shape: + if verbose: print( f"ViT expects input shape of {model.default_cfg['input_size']}, " f"but {input_shape} specified as the input shape. " f"The input will be rescaled to {model.default_cfg['input_size']}" ) - self.to_reshape = True - else: - raise ValueError("Vision transformer is not of ArtViT. Error occurred in ArtViT creation.") + self.to_reshape = True if optimizer is None or isinstance(optimizer, torch.optim.Optimizer): super().__init__( @@ -333,7 +339,9 @@ def __init__( self.ablation_size = (ablation_size,) - print(self.model) + if verbose: + print(self.model) + self.ablator = ColumnAblator( ablation_size=ablation_size, channels_first=True, @@ -342,6 +350,71 @@ def __init__( output_shape=model.default_cfg["input_size"], ) + @classmethod + def get_models(cls, generate_from_null: bool = False) -> List[str]: + """ + Return the supported model names to the user. + + :param generate_from_null: If to re-check the creation of all the ViTs in timm from scratch. + Can be time-consuming. + :return: A list of compatible models + """ + import timm + + supported_models = ['vit_base_patch8_224', + 'vit_base_patch16_18x2_224', 'vit_base_patch16_224', + 'vit_base_patch16_224_miil', 'vit_base_patch16_384', + 'vit_base_patch16_clip_224', 'vit_base_patch16_clip_384', + 'vit_base_patch16_gap_224', 'vit_base_patch16_plus_240', + 'vit_base_patch16_rpn_224', 'vit_base_patch16_xp_224', + 'vit_base_patch32_224', 'vit_base_patch32_384', + 'vit_base_patch32_clip_224', 'vit_base_patch32_clip_384', + 'vit_base_patch32_clip_448', 'vit_base_patch32_plus_256', + 'vit_giant_patch14_224', 'vit_giant_patch14_clip_224', + 'vit_gigantic_patch14_224', 'vit_gigantic_patch14_clip_224', + 'vit_huge_patch14_224', 'vit_huge_patch14_clip_224', + 'vit_huge_patch14_clip_336', 'vit_huge_patch14_xp_224', + 'vit_large_patch14_224', 'vit_large_patch14_clip_224', + 'vit_large_patch14_clip_336', 'vit_large_patch14_xp_224', + 'vit_large_patch16_224', 'vit_large_patch16_384', 'vit_large_patch32_224', + 'vit_large_patch32_384', 'vit_medium_patch16_gap_240', + 'vit_medium_patch16_gap_256', 'vit_medium_patch16_gap_384', + 'vit_small_patch16_18x2_224', 'vit_small_patch16_36x1_224', + 'vit_small_patch16_224', 'vit_small_patch16_384', + 'vit_small_patch32_224', 'vit_small_patch32_384', + 'vit_tiny_patch16_224', 'vit_tiny_patch16_384'] + + if not generate_from_null: + return supported_models + + supported = [] + unsupported = [] + + models = timm.list_models('vit_*') + for model in models: + print(f'Testing {model} creation') + try: + _ = PyTorchSmoothedViT(model=model, + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + load_pretrained=False, + replace_last_layer=True, + verbose=False) + supported.append(model) + except Exception: + unsupported.append(model) + + if supported != supported_models: + print('Difference between the generated and fixed model list. Although not necessarily ' + 'an error, this may point to the timm library being updated.') + + return supported + + @staticmethod def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> ArtViT: """ @@ -500,13 +573,14 @@ def fit( # pylint: disable=W0221 if scheduler is not None: scheduler.step() - def eval_and_certify(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128): + def eval_and_certify(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128) -> Tuple["torch.Tensor", "torch.Tensor"]: """ Evaluates the ViT's normal and certified performance over the supplied data. :param x: Evaluation data. :param y: Evaluation labels. :param batch_size: batch size when evaluating. + :return: The accuracy and certtified accuracy over the dataset """ self.model.eval() @@ -555,6 +629,7 @@ def eval_and_certify(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128): f"Normal Acc {torch.mean(torch.stack(accuracy)):.2f} " f"Cert Acc {torch.mean(torch.stack(cert_acc)):.2f}" ) + return torch.mean(torch.stack(accuracy)), torch.mean(torch.stack(cert_acc)) @staticmethod def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index d6125f33dd..7a83d4e16c 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -125,8 +125,8 @@ def test_pytorch_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data) """ import torch try: - cifar_data = fix_get_cifar10_data[0][0:50] - cifar_labels = fix_get_cifar10_data[1][0:50] + cifar_data = fix_get_cifar10_data[0][:50] + cifar_labels = fix_get_cifar10_data[1][:50] art_model = PyTorchSmoothedViT(model='vit_small_patch16_224', loss=torch.nn.CrossEntropyLoss(), @@ -134,10 +134,9 @@ def test_pytorch_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data) optimizer_params={"lr": 0.01}, input_shape=(3, 32, 32), nb_classes=10, - ablation_type='column', ablation_size=4, - threshold=0.01, - load_pretrained=True) + load_pretrained=True, + replace_last_layer=True) scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[1], gamma=0.1) art_model.fit(cifar_data, cifar_labels, nb_epochs=2, update_batchnorm=True, scheduler=scheduler) @@ -149,10 +148,7 @@ def test_pytorch_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data) @pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10_data): """ - Check that the training loop for pytorch does not result in errors - """ - """ - Check that the ablation is being performed correctly + Check that ... """ from art.estimators.certification.smoothed_vision_transformers.smooth_vit import ColumnAblator import torch @@ -171,4 +167,3 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 assert torch.equal(cert_and_correct, torch.tensor([True, False, False])) except ARTTestException as e: art_warning(e) - From 13ece2c2e9a93990f60df4cace221ef645611260 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Sun, 28 May 2023 20:01:48 +0000 Subject: [PATCH 13/55] adding functionality to get supported models and re-setting the timm library methods after ViT creation Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/pytorch.py | 181 ++++++++++++------ .../smooth_vit.py | 12 +- 2 files changed, 128 insertions(+), 65 deletions(-) diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers/pytorch.py index c6b6090730..96bc291951 100644 --- a/art/estimators/certification/smoothed_vision_transformers/pytorch.py +++ b/art/estimators/certification/smoothed_vision_transformers/pytorch.py @@ -46,7 +46,6 @@ from art.defences.postprocessor import Postprocessor logger = logging.getLogger(__name__) -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") class PatchEmbed(torch.nn.Module): @@ -94,13 +93,14 @@ def __init__(self, patch_size: int = 16, in_channels: int = 1, embed_dim: int = self.embed_dim = embed_dim self.proj: Optional[torch.nn.Conv2d] = None - def create(self, patch_size=None, embed_dim=None, **kwargs) -> None: # pylint: disable=W0613 + def create(self, patch_size=None, embed_dim=None, device="cpu", **kwargs) -> None: # pylint: disable=W0613 """ Creates a convolution that mimics the embedding layer to be used for the ablation mask to track where the image was ablated. :param patch_size: The patch size used by the ViT :param embed_dim: The embedding dimension used by the ViT + :param device: Which device to set the emdedding layer to. :param kwargs: Handles the remaining kwargs from the ViT configuration. """ @@ -116,7 +116,6 @@ def create(self, patch_size=None, embed_dim=None, **kwargs) -> None: # pylint: stride=self.patch_size, bias=False, ) - w_shape = self.proj.weight.shape self.proj.weight = torch.nn.Parameter(torch.ones(w_shape).to(device)) @@ -139,6 +138,7 @@ class ArtViT(VisionTransformer): """ Art class inheriting from VisionTransformer to control the forward pass of the ViT. """ + # Make as a class attribute to avoid being included in the # state dictionaries of the ViT Model. ablation_mask_embedder = PatchEmbed(in_channels=1) @@ -148,20 +148,29 @@ def __init__(self, **kwargs): Create a ArtViT instance :param kwargs: keyword arguments required to create the mask embedder. """ - self.to_drop_tokens = kwargs['drop_tokens'] - del kwargs['drop_tokens'] + self.to_drop_tokens = kwargs["drop_tokens"] + + if kwargs["device_type"] == "cpu" or not torch.cuda.is_available(): + self.device = torch.device("cpu") + else: # pragma: no cover + cuda_idx = torch.cuda.current_device() + self.device = torch.device(f"cuda:{cuda_idx}") + + del kwargs["drop_tokens"] + del kwargs["device_type"] + super().__init__(**kwargs) - self.ablation_mask_embedder.create(**kwargs) + self.ablation_mask_embedder.create(device=self.device, **kwargs) - self.in_chans = kwargs['in_chans'] - self.img_size = kwargs['img_size'] + self.in_chans = kwargs["in_chans"] + self.img_size = kwargs["img_size"] @staticmethod def drop_tokens(x: torch.Tensor, indexes: torch.Tensor) -> torch.Tensor: """ Drops the tokens which correspond to fully masked inputs - :param x: Input data in .... format + :param x: Input data :param indexes: positions to be ablated :return: Input with tokens dropped where the input was fully ablated. """ @@ -189,7 +198,7 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor: ablated_input = True if ablated_input: - x, ablation_mask = x[:, :self.in_chans], x[:, self.in_chans:self.in_chans + 1] + x, ablation_mask = x[:, : self.in_chans], x[:, self.in_chans : self.in_chans + 1] x = self.patch_embed(x) x = self._pos_embed(x) @@ -200,6 +209,7 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor: indexes = torch.gt(torch.where(to_drop > 1, 1, 0), 0) x = self.drop_tokens(x, indexes) + x = self.norm_pre(x) x = self.blocks(x) return self.norm(x) @@ -213,6 +223,7 @@ class PyTorchSmoothedViT(PyTorchClassifier): | Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf """ + def __init__( self, model: Union[VisionTransformer, str], @@ -264,9 +275,15 @@ def __init__( """ import timm + # temporarily assign the original method to tmp_func + tmp_func = timm.models.vision_transformer._create_vision_transformer + + # overrride with ART's ViT creation function timm.models.vision_transformer._create_vision_transformer = self.art_create_vision_transformer if isinstance(model, str): - model = timm.create_model(model, pretrained=load_pretrained, drop_tokens=drop_tokens) + model = timm.create_model( + model, pretrained=load_pretrained, drop_tokens=drop_tokens, device_type=device_type + ) if replace_last_layer: model.head = torch.nn.Linear(model.head.in_features, nb_classes) if isinstance(optimizer, type): @@ -280,9 +297,11 @@ def __init__( supplied_state_dict = model.state_dict() supported_models = self.get_models() if pretrained_cfg["architecture"] not in supported_models: - raise ValueError('Architecture not supported. Use PyTorchSmoothedViT.get_models() ' - 'to get the supported model architectures.') - model = timm.create_model(pretrained_cfg["architecture"], drop_tokens=drop_tokens) + raise ValueError( + "Architecture not supported. Use PyTorchSmoothedViT.get_models() " + "to get the supported model architectures." + ) + model = timm.create_model(pretrained_cfg["architecture"], drop_tokens=drop_tokens, device_type=device_type) model.load_state_dict(supplied_state_dict) if replace_last_layer: model.head = torch.nn.Linear(model.head.in_features, nb_classes) @@ -307,9 +326,11 @@ def __init__( if not isinstance(model, ArtViT): raise ValueError("Vision transformer is not of ArtViT. Error occurred in ArtViT creation.") - if model.default_cfg['input_size'][0] != input_shape[0]: - raise ValueError(f'ViT requires {model.default_cfg["input_size"][0]} channel input,' - f' but {input_shape[0]} channels were provided.') + if model.default_cfg["input_size"][0] != input_shape[0]: + raise ValueError( + f'ViT requires {model.default_cfg["input_size"][0]} channel input,' + f" but {input_shape[0]} channels were provided." + ) if model.default_cfg["input_size"] != input_shape: if verbose: @@ -348,8 +369,12 @@ def __init__( to_reshape=self.to_reshape, original_shape=input_shape, output_shape=model.default_cfg["input_size"], + device_type=device_type, ) + # set the method back to avoid unexpected side effects later on should timm need to be reused. + timm.models.vision_transformer._create_vision_transformer = tmp_func + @classmethod def get_models(cls, generate_from_null: bool = False) -> List[str]: """ @@ -361,28 +386,52 @@ def get_models(cls, generate_from_null: bool = False) -> List[str]: """ import timm - supported_models = ['vit_base_patch8_224', - 'vit_base_patch16_18x2_224', 'vit_base_patch16_224', - 'vit_base_patch16_224_miil', 'vit_base_patch16_384', - 'vit_base_patch16_clip_224', 'vit_base_patch16_clip_384', - 'vit_base_patch16_gap_224', 'vit_base_patch16_plus_240', - 'vit_base_patch16_rpn_224', 'vit_base_patch16_xp_224', - 'vit_base_patch32_224', 'vit_base_patch32_384', - 'vit_base_patch32_clip_224', 'vit_base_patch32_clip_384', - 'vit_base_patch32_clip_448', 'vit_base_patch32_plus_256', - 'vit_giant_patch14_224', 'vit_giant_patch14_clip_224', - 'vit_gigantic_patch14_224', 'vit_gigantic_patch14_clip_224', - 'vit_huge_patch14_224', 'vit_huge_patch14_clip_224', - 'vit_huge_patch14_clip_336', 'vit_huge_patch14_xp_224', - 'vit_large_patch14_224', 'vit_large_patch14_clip_224', - 'vit_large_patch14_clip_336', 'vit_large_patch14_xp_224', - 'vit_large_patch16_224', 'vit_large_patch16_384', 'vit_large_patch32_224', - 'vit_large_patch32_384', 'vit_medium_patch16_gap_240', - 'vit_medium_patch16_gap_256', 'vit_medium_patch16_gap_384', - 'vit_small_patch16_18x2_224', 'vit_small_patch16_36x1_224', - 'vit_small_patch16_224', 'vit_small_patch16_384', - 'vit_small_patch32_224', 'vit_small_patch32_384', - 'vit_tiny_patch16_224', 'vit_tiny_patch16_384'] + supported_models = [ + "vit_base_patch8_224", + "vit_base_patch16_18x2_224", + "vit_base_patch16_224", + "vit_base_patch16_224_miil", + "vit_base_patch16_384", + "vit_base_patch16_clip_224", + "vit_base_patch16_clip_384", + "vit_base_patch16_gap_224", + "vit_base_patch16_plus_240", + "vit_base_patch16_rpn_224", + "vit_base_patch16_xp_224", + "vit_base_patch32_224", + "vit_base_patch32_384", + "vit_base_patch32_clip_224", + "vit_base_patch32_clip_384", + "vit_base_patch32_clip_448", + "vit_base_patch32_plus_256", + "vit_giant_patch14_224", + "vit_giant_patch14_clip_224", + "vit_gigantic_patch14_224", + "vit_gigantic_patch14_clip_224", + "vit_huge_patch14_224", + "vit_huge_patch14_clip_224", + "vit_huge_patch14_clip_336", + "vit_huge_patch14_xp_224", + "vit_large_patch14_224", + "vit_large_patch14_clip_224", + "vit_large_patch14_clip_336", + "vit_large_patch14_xp_224", + "vit_large_patch16_224", + "vit_large_patch16_384", + "vit_large_patch32_224", + "vit_large_patch32_384", + "vit_medium_patch16_gap_240", + "vit_medium_patch16_gap_256", + "vit_medium_patch16_gap_384", + "vit_small_patch16_18x2_224", + "vit_small_patch16_36x1_224", + "vit_small_patch16_224", + "vit_small_patch16_384", + "vit_small_patch32_224", + "vit_small_patch32_384", + "vit_tiny_patch16_224", + "vit_tiny_patch16_384", + ] if not generate_from_null: return supported_models @@ -390,31 +439,34 @@ def get_models(cls, generate_from_null: bool = False) -> List[str]: supported = [] unsupported = [] - models = timm.list_models('vit_*') + models = timm.list_models("vit_*") for model in models: - print(f'Testing {model} creation') + print(f"Testing {model} creation") try: - _ = PyTorchSmoothedViT(model=model, - loss=torch.nn.CrossEntropyLoss(), - optimizer=torch.optim.SGD, - optimizer_params={"lr": 0.01}, - input_shape=(3, 32, 32), - nb_classes=10, - ablation_size=4, - load_pretrained=False, - replace_last_layer=True, - verbose=False) + _ = PyTorchSmoothedViT( + model=model, + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + load_pretrained=False, + replace_last_layer=True, + verbose=False, + ) supported.append(model) - except Exception: + except (TypeError, AttributeError): unsupported.append(model) if supported != supported_models: - print('Difference between the generated and fixed model list. Although not necessarily ' - 'an error, this may point to the timm library being updated.') + print( + "Difference between the generated and fixed model list. Although not necessarily " + "an error, this may point to the timm library being updated." + ) return supported - @staticmethod def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> ArtViT: """ @@ -452,7 +504,7 @@ def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) - with torch.no_grad(): for _ in tqdm(range(nb_epochs)): for m in tqdm(range(num_batch)): - i_batch = torch.from_numpy(np.copy(x[ind[m * batch_size : (m + 1) * batch_size]])).to(device) + i_batch = torch.from_numpy(np.copy(x[ind[m * batch_size : (m + 1) * batch_size]])).to(self.device) i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) _ = self.model(i_batch) @@ -466,6 +518,7 @@ def fit( # pylint: disable=W0221 drop_last: bool = False, scheduler: Optional[Any] = None, update_batchnorm: bool = True, + batchnorm_update_epochs: int = 1, transform: Optional["torchvision.transforms.transforms.Compose"] = None, verbose: bool = True, **kwargs, @@ -484,8 +537,10 @@ def fit( # pylint: disable=W0221 :param scheduler: Learning rate scheduler to run at the start of every epoch. :param update_batchnorm: if to run the training data through the model to update any batch norm statistics prior to training. Useful on small datasets when using pre-trained ViTs. + :param batchnorm_update_epochs: how many times to forward pass over the training data + to pre-adjust the batchnorm statistics. + :param transform: Torchvision compose of relevant augmentation transformations to apply. :param verbose: if to display training progress bars - :param transform: :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch and providing it takes no effect. """ @@ -502,7 +557,7 @@ def fit( # pylint: disable=W0221 x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) if update_batchnorm: - self.update_batchnorm(x_preprocessed, batch_size) + self.update_batchnorm(x_preprocessed, batch_size, nb_epochs=batchnorm_update_epochs) # Check label shape y_preprocessed = self.reduce_labels(y_preprocessed) @@ -573,14 +628,16 @@ def fit( # pylint: disable=W0221 if scheduler is not None: scheduler.step() - def eval_and_certify(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128) -> Tuple["torch.Tensor", "torch.Tensor"]: + def eval_and_certify( + self, x: np.ndarray, y: np.ndarray, batch_size: int = 128 + ) -> Tuple["torch.Tensor", "torch.Tensor"]: """ Evaluates the ViT's normal and certified performance over the supplied data. :param x: Evaluation data. :param y: Evaluation labels. :param batch_size: batch size when evaluating. - :return: The accuracy and certtified accuracy over the dataset + :return: The accuracy and certified accuracy over the dataset """ self.model.eval() @@ -626,7 +683,7 @@ def eval_and_certify(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128) accuracy.append(acc) pbar.set_description( - f"Normal Acc {torch.mean(torch.stack(accuracy)):.2f} " + f"Normal Acc {torch.mean(torch.stack(accuracy)):.2f} " f"Cert Acc {torch.mean(torch.stack(cert_acc)):.2f}" ) return torch.mean(torch.stack(accuracy)), torch.mean(torch.stack(cert_acc)) @@ -636,7 +693,7 @@ def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndar """ Helper function to print out the accuracy during training. - :param preds: (concrete) model predictions. + :param preds: model predictions. :param labels: ground truth labels (not one hot). :return: prediction accuracy. """ diff --git a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py index 838d0bfa8b..f9a660199e 100644 --- a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py +++ b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py @@ -28,8 +28,6 @@ import torch -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - class UpSampler(torch.nn.Module): """ @@ -68,6 +66,7 @@ def __init__( to_reshape: bool = False, original_shape: Optional[Tuple] = None, output_shape: Optional[Tuple] = None, + device_type: str = "gpu", ): """ Creates a column ablator @@ -82,6 +81,13 @@ def __init__( self.ablation_size = ablation_size self.channels_first = channels_first self.to_reshape = to_reshape + + if device_type == "cpu" or not torch.cuda.is_available(): + self.device = torch.device("cpu") + else: # pragma: no cover + cuda_idx = torch.cuda.current_device() + self.device = torch.device(f"cuda:{cuda_idx}") + if original_shape is not None and output_shape is not None: self.upsample = UpSampler(input_size=original_shape[1], final_size=output_shape[1]) @@ -110,7 +116,7 @@ def forward(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: :return: The albated input with an extra channel indicating the location of the ablation """ assert x.shape[1] == 3 - ones = torch.torch.ones_like(x[:, 0:1, :, :]).to(device) + ones = torch.torch.ones_like(x[:, 0:1, :, :]).to(self.device) x = torch.cat([x, ones], dim=1) x = self.ablate(x, column_pos=column_pos) if self.to_reshape: From 280df6664dca07992bc0cd9d40575188c02bc218 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Sun, 28 May 2023 20:05:55 +0000 Subject: [PATCH 14/55] adding additional tests and cuda fixes Signed-off-by: GiulioZizzo --- notebooks/smoothed_vision_transformers.ipynb | 882 +++++++++++++++++- .../certification/test_smooth_vit.py | 151 ++- 2 files changed, 986 insertions(+), 47 deletions(-) diff --git a/notebooks/smoothed_vision_transformers.ipynb b/notebooks/smoothed_vision_transformers.ipynb index 01cbb6829e..22c896f161 100644 --- a/notebooks/smoothed_vision_transformers.ipynb +++ b/notebooks/smoothed_vision_transformers.ipynb @@ -17,24 +17,22 @@ "\n", "### Overview\n", "\n", - "This was introduced in Certified Patch Robustness via Smoothed Vision Transformers (https://arxiv.org/abs/2110.07719). The core technique is one of *image ablations*, where the image is blanked out except for certain regions. By ablating the input in different ways every time we can obtain many predicitons for a single input. Now, as we are ablating large parts of the image the attacker's patch attack is also getting removed in many predictions. Based on factors like the size of the adversarial patch and the size of the retained part of the image the attacker will only be able to influence a limited number of predictions. In fact, if the attacker has a m x m patch attack and the retained part of the image is a column of width s then the maximum number of predictions that could be affected are: \n", + "This method was introduced in Certified Patch Robustness via Smoothed Vision Transformers (https://arxiv.org/abs/2110.07719). The core technique is one of *image ablations*, where the image is blanked out except for certain regions. By ablating the input in different ways every time we can obtain many predicitons for a single input. Now, as we are ablating large parts of the image the attacker's patch attack is also getting removed in many predictions. Based on factors like the size of the adversarial patch and the size of the retained part of the image the attacker will only be able to influence a limited number of predictions. In fact, if the attacker has a $m x m$ patch attack and the retained part of the image is a column of width $s$ then the maximum number of predictions $\\Delta$ that could be affected are: \n", "\n", - "$\n", - "insert equation here\n", - "$\n", + "

$\\Delta = m + s - 1$

\n", "\n", - "Based on this relationship we can derive a simple but effective criterion that if we are making many predictions for an image and the highest predicted class $c_t$ has been predicted $k$ times and the second most predicted class $c_{t-1}$ has been predicted $k_{t-1}$ times then we have a certified prediction if: \n", + "Based on this relationship we can derive a simple but effective criterion that if we are making many predictions for an image and the highest predicted class $c_t$ has been predicted $k_t$ times and the second most predicted class $c_{t-1}$ has been predicted $k_{t-1}$ times then we have a certified prediction for $c_t$ if: \n", "\n", "\n", - "$insert here$\n", + "

$k_t - k_{t-1} > 2\\Delta$

\n", "\n", "Intuitivly we are saying that even if $k$ predictions were adversarially influenced and those predictions were to change, then the model will *still* have predicted class $c_t$.\n", "\n", "### What's special about Vision Transformers?\n", "\n", - "The formulation above is very generic and it can be applied to any nerual network model, in fact the original paper which proposed it () considered the case with convolutional nerual networks. \n", + "The formulation above is very generic and it can be applied to any nerual network model, in fact the original paper which proposed it (https://arxiv.org/abs/2110.07719) considered the case with convolutional nerual networks. \n", "\n", - "However, Vision Transformers (or ViTs) are well siuted to this task of predicting with vision ablations for two key reasons: \n", + "However, Vision Transformers (ViTs) are well siuted to this task of predicting with vision ablations for two key reasons: \n", "\n", "+ ViTs first tokenize the input into a series of image regions which get embedded and then processed through the neural network. Thus, by considering the input as a set of tokens we can drop tokens which correspond to fully masked (i.e ablated)regions significantly saving on the compute costs. \n", "\n", @@ -45,21 +43,49 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "aeb27667", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], "source": [ + "import sys\n", + "import numpy as np\n", + "import torch\n", + "\n", + "sys.path.append(\"..\")\n", + "from torchvision import datasets\n", + "from matplotlib import pyplot as plt\n", + "\n", "# The core tool is PyTorchSmoothedViT which can be imported as follows:\n", - "from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT" + "from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT\n", + "\n", + "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "80541a3a", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Files already downloaded and verified\n", + "Files already downloaded and verified\n" + ] + } + ], "source": [ "# Function to fetch the cifar-10 data\n", "def get_cifar_data():\n", @@ -90,22 +116,744 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "353ef5a6", - "metadata": {}, - "outputs": [], + "execution_count": 3, + "id": "2ac0c5b3", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/plain": [ + "['vit_base_patch8_224',\n", + " 'vit_base_patch16_18x2_224',\n", + " 'vit_base_patch16_224',\n", + " 'vit_base_patch16_224_miil',\n", + " 'vit_base_patch16_384',\n", + " 'vit_base_patch16_clip_224',\n", + " 'vit_base_patch16_clip_384',\n", + " 'vit_base_patch16_gap_224',\n", + " 'vit_base_patch16_plus_240',\n", + " 'vit_base_patch16_rpn_224',\n", + " 'vit_base_patch16_xp_224',\n", + " 'vit_base_patch32_224',\n", + " 'vit_base_patch32_384',\n", + " 'vit_base_patch32_clip_224',\n", + " 'vit_base_patch32_clip_384',\n", + " 'vit_base_patch32_clip_448',\n", + " 'vit_base_patch32_plus_256',\n", + " 'vit_giant_patch14_224',\n", + " 'vit_giant_patch14_clip_224',\n", + " 'vit_gigantic_patch14_224',\n", + " 'vit_gigantic_patch14_clip_224',\n", + " 'vit_huge_patch14_224',\n", + " 'vit_huge_patch14_clip_224',\n", + " 'vit_huge_patch14_clip_336',\n", + " 'vit_huge_patch14_xp_224',\n", + " 'vit_large_patch14_224',\n", + " 'vit_large_patch14_clip_224',\n", + " 'vit_large_patch14_clip_336',\n", + " 'vit_large_patch14_xp_224',\n", + " 'vit_large_patch16_224',\n", + " 'vit_large_patch16_384',\n", + " 'vit_large_patch32_224',\n", + " 'vit_large_patch32_384',\n", + " 'vit_medium_patch16_gap_240',\n", + " 'vit_medium_patch16_gap_256',\n", + " 'vit_medium_patch16_gap_384',\n", + " 'vit_small_patch16_18x2_224',\n", + " 'vit_small_patch16_36x1_224',\n", + " 'vit_small_patch16_224',\n", + " 'vit_small_patch16_384',\n", + " 'vit_small_patch32_224',\n", + " 'vit_small_patch32_384',\n", + " 'vit_tiny_patch16_224',\n", + " 'vit_tiny_patch16_384']" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# There are a few ways we can interface with PyTorchSmoothedViT. \n", "# The most direct way to get setup is by specifying the name of a supported transformer.\n", - "# Behind the scenes we are using the timm library (link: ) so any ViT supported by that libary will work.\n", + "# Behind the scenes we are using the timm library (link: https://github.com/huggingface/pytorch-image-models).\n", + "\n", + "\n", + "# We currently support ViTs generated via: \n", + "# https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py\n", + "# Support for other architectures can be added in. Consider raising a feature or pull request to have \n", + "# additional models supported.\n", + "\n", + "# We can see all the models supported by using the .get_models() method:\n", + "PyTorchSmoothedViT.get_models()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e8bac618", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ViT expects input shape of (3, 224, 224), but (3, 32, 32) specified as the input shape. The input will be rescaled to (3, 224, 224)\n", + "ArtViT(\n", + " (patch_embed): PatchEmbed(\n", + " (proj): Conv2d(3, 384, kernel_size=(16, 16), stride=(16, 16))\n", + " (norm): Identity()\n", + " )\n", + " (pos_drop): Dropout(p=0.0, inplace=False)\n", + " (patch_drop): Identity()\n", + " (norm_pre): Identity()\n", + " (blocks): Sequential(\n", + " (0): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (1): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (2): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (3): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (4): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (5): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (6): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (7): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (8): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (9): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (10): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (11): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " )\n", + " (norm): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (fc_norm): Identity()\n", + " (head_drop): Dropout(p=0.0, inplace=False)\n", + " (head): Linear(in_features=384, out_features=10, bias=True)\n", + ")\n" + ] + } + ], + "source": [ + "import timm\n", + "\n", + "# We can setup the PyTorchSmoothedViT if we start with a ViT model directly.\n", + "\n", + "vit_model = timm.create_model('vit_small_patch16_224')\n", + "optimizer = torch.optim.Adam(vit_model.parameters(), lr=1e-4)\n", + "\n", + "art_model = PyTorchSmoothedViT(model=vit_model, # Name of the model acitecture to load\n", + " loss=torch.nn.CrossEntropyLoss(), # loss function to use\n", + " optimizer=optimizer, # the optimizer to use: note! this is not initialised here we just supply the class!\n", + " input_shape=(3, 32, 32), # the input shape of the data: Note! that if this is a different shape to what the ViT expects it will be re-scaled\n", + " nb_classes=10,\n", + " ablation_size=4,\n", + " replace_last_layer=True, # Replace the last layer with a new set of weights to fine tune on new data\n", + " load_pretrained=True) # if to load pre-trained weights for the ViT" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "353ef5a6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ViT expects input shape of (3, 224, 224), but (3, 32, 32) specified as the input shape. The input will be rescaled to (3, 224, 224)\n", + "ArtViT(\n", + " (patch_embed): PatchEmbed(\n", + " (proj): Conv2d(3, 384, kernel_size=(16, 16), stride=(16, 16))\n", + " (norm): Identity()\n", + " )\n", + " (pos_drop): Dropout(p=0.0, inplace=False)\n", + " (patch_drop): Identity()\n", + " (norm_pre): Identity()\n", + " (blocks): Sequential(\n", + " (0): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (1): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (2): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (3): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (4): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (5): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (6): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (7): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (8): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (9): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (10): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (11): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " )\n", + " (norm): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (fc_norm): Identity()\n", + " (head_drop): Dropout(p=0.0, inplace=False)\n", + " (head): Linear(in_features=384, out_features=10, bias=True)\n", + ")\n" + ] + } + ], + "source": [ + "# Or we can just feed in the model name and ART will internally create the ViT.\n", "\n", "art_model = PyTorchSmoothedViT(model='vit_small_patch16_224', # Name of the model acitecture to load\n", " loss=torch.nn.CrossEntropyLoss(), # loss function to use\n", " optimizer=torch.optim.SGD, # the optimizer to use: note! this is not initialised here we just supply the class!\n", " optimizer_params={\"lr\": 0.01}, # the parameters to use\n", - " input_shape=(3, 32, 32), # the input shape of the data: Note! ...\n", + " input_shape=(3, 32, 32), # the input shape of the data: Note! that if this is a different shape to what the ViT expects it will be re-scaled\n", " nb_classes=10,\n", " ablation_size=4,\n", + " replace_last_layer=True, # Replace the last layer with a new set of weights to fine tune on new data\n", " load_pretrained=True) # if to load pre-trained weights for the ViT" ] }, @@ -121,15 +869,60 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "id": "44975815", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The shape of the ablated image is (10, 4, 224, 224)\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAh8AAAESCAYAAABdK7eSAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABCAklEQVR4nO3de3QV9bk38O/Mvue2cyM3AsgdVECLGqPW10oOIXZ5tPCHeFg9qCxZ5QTWUmxtc5bFS9uVql2t1UNxnXMs4FpSWs8SfbUtFlHCsQaqqbxeUCqUCkgSLiG3nX2d+b1/xGwJmWcgkOwb389ao2Q/e/b+zczekycz88yjKaUUiIiIiBJET/YAiIiI6OLC5IOIiIgSiskHERERJRSTDyIiIkooJh9ERESUUEw+iIiIKKGYfBAREVFCMfkgIiKihGLyQURERAnF5IOIiIgSiskHEaWMtWvX4pJLLoHX60VVVRX+8pe/JHtIRDQKmHwQUUr47W9/i9WrV+Phhx/GX//6V8yZMwe1tbU4duxYsodGRCNMY2M5IkoFVVVVuPrqq/Ef//EfAADTNDFu3DisWrUKP/jBD5I8OiIaSc7ReuG1a9fiySefRFtbG+bMmYNnnnkG11xzzVnnM00TR48eRW5uLjRNG63hEZENpRR6enpQUVEBXR/9A6SRSAQtLS1oaGiIP6brOmpqatDc3Gw5TzgcRjgcjv9smiY6OjpQVFTEfQdREgxrv6FGwebNm5Xb7Va//vWv1ccff6zuvfdelZ+fr9rb28867+HDhxUATpw4pcB0+PDh0dhFDPHFF18oAOqdd94Z9Pj3vvc9dc0111jO8/DDDyd9/XDixGnodC77jVE58vHzn/8c9957L+6++24AwLPPPovf//73+PWvf33Ww6e5ubkAgLnXVMHpHDq8rq5T4rwe3RRjBW4lxioLssRYcaEcK/JnizG37hJjDo9PjPU/wSGGTnV2ibFoTF7GfL9fjOlGVIyFI2ExFgrJMa/PI8YMGGIsGAyIsTx/rhiDkl8zEpGXz2Fz8M9hsx1ysnPksQDIzpI/N06XV4yFwhExpjSbvyR0eTkiEfk1Y8r6CEEoHMEPn34h/n1MRQ0NDVi9enX8566uLowfPz6JIzp3//JPV2LmJWP6d9VnbAKH5oBD2NaawwndKexbNA3Q9P7XG9gVfPnvD/cfwcbX/jxCoyeydy77jRFPPoZ7+PTMQ6c9PT39A3M6LZMPu18IDl0+1Op0yL+Y3S75NT0ueRV53XKC4XbIMadHjgEAHPJ7Bm3eU9flZfTavKcu/96GBjmhgynPaLduDJvrnE3DZn3brTclv6YOeb04YJMk2nzWfGfZhj6vW4y5XHLM7mzB+SYfDpvXlJKPr8aTmNMXxcXFcDgcaG9vH/R4e3s7ysrKLOfxeDzweOQkN5W5XQ7xO+LQHHAIh6z1syYfX35mBxIQbeD9Ru0MO9EQ57LfGPGTuSdOnIBhGCgtLR30eGlpKdra2oY8v7GxEX6/Pz6NGzdupIdERCnO7XZj7ty52L59e/wx0zSxfft2VFdXJ3FkaUbDV0dSeNkLpbCkl9o2NDSgq6srPh0+fDjZQyKiJFi9ejX+67/+Cxs3bsQnn3yCFStWIBAIxE/fkgX5oB5RShvxY3HDPXyazodOiWjk3HHHHTh+/DjWrFmDtrY2XHHFFdi6deuQo6h0Gh7doDQ14snH6YdPb7/9dgBfHT5duXLlOb/Op59+As3ivGfniRPiPIXydXzQiuRgsSFfHKP5SsRYwOwQY72G/CeJ0uRz/gDQF5IvEOwLyhd5Rg35+owTNif+vU55rLGY/JoOm+sM7BLKvpB8UWnMlJddCxWJMV2+PAPRsLzOfE75c9Frc/FnhxGT3xBAVpZ8MbJmczGyZnOtEGxK1/pC8kW1sajNBbdO6+0Ujtov32hZuXLlsPYT6SrcexyBDuvPpTKVeERDwfHVdR1niBkGIlHr67COHe0+r3ESjZZRuQpp9erVWLp0Ka666ipcc801eOqpp3j4lIhoFNndLlLx/AylmFFJPnj4lIiIiCSjVn91sRw+JSIiouFh8TcRUYqzOmkycBsPnlChdMTkg4goxQ1cLq4sHmPBC6WjpN/ng4iIRg6PhFA6SNkjH16nBt3qduk2twSZYFNOe0mp3NukZEyhGPPZlUza3EI2GA6JsVBULv0EAGXzum6fTV8Ym94uypTf02/TvyYWtbstvTwWw+aW7Q63vBHDEXm9RWPyesmyeU1ntjxOr818MU0uCdaVzW3nAcRs/h61u915Tra8LXoDfWIsGpPLaW26DqCn27pXkFSySSND0z3QHNafy5gRhRGzXv8xw0TMtC6DVoB4f377TytR4qVs8kFElMnkP15Ov0f6YIppBGUInnYhIkoqniihiw+TDyKipOIlo3TxYfJBRERECcXkg4goqXjahS4+TD6IiJKKtwqji0/KVrt4NQO6NvTK7txcecjTxhaIsSKf3PbUZcrlnb0dcmdTw5Rzt2Cf3BVUt29qi7z8HDHmtCkN7ezqkeez2dKFuXJ5Z0+3XG4aselOG7TpsqpsznHnZMulzdFIUIzphryALpsOu4Yhj9NpUxMbDsvzAYDbJW9kXSiVBIBw7yn5RW06JXtsuvrGTLlCoitgXYIdselmTBeur6cXXUKD7kjEgCGVzTsATbeOudwueH3WnzuXXX03URKkbPJBRJSplFJQQhtaZcoxzfYAiRpSvjtwC3abWwcRJQVPuxARZSjmHJSqmHwQESXBaFzlwStHKF0w+SAiSoLROCrBIx2ULph8EBGlmvM8hKHOf1aihGLyQUSURJbJwnkewuCRD0oXKVvtku9xwKEPzY18NmWTfpvupWPyXGLMMOUOnna9PR1Om/pGi7EPCJv2ZZpOm7pYp003VSMsl6IqhzyeY8c65de06W7a0yd3We0z5BLlHF+eGENYfj+HTVMt3aYMwOGRux0HA3KZdZZLHqdTqEYYEArJyx+MyqW2ps3frZ298lg7++TPVK9N2Xcoav25iBkstR1NTocOl8t63Wu6Bmn1G8qAKXwPdKcOh8N6n6Tb7I+IkiFlkw8iooylyV1tNU3+28U0NSho8duSDX4FzfI11cAbEqUQpsNEREk2nOs0tDP+f67PJ0olTD6IiJKMCQJdbJh8EBFlKFa+UKpi8kFElMGYgFAq4gWnREQZiqdzKFWNePLxyCOP4NFHHx302PTp0/Hpp58O63WK/V44LcpDc11yeavXK8d0h5z/+3xyiW40Jpd+mjZfbaXkUsuI1LHyS0ZELps0lU23WJvyVuWUu6z2ROTutIYhr9M+m3JMu1LNnoC8DF90yGNx6fJr5vXK2yLaJrQPBRDsksuFxxdPEWMlJZViDAC03C4xFj51Uoz19srL39Ujl9qe6JLLrP9xWB6L4bDeBZhnKSWmC+Ny6PA4hVJbGGInYlMpGEJ3Y2WYMIQybmXY3TSAKPFG5cjHZZddhjfeeOOrN7Hr505EdJHpL6e1KbUVcun+QluBUv2TZYjJJKWWUckKnE4nysrKRuOliYiIKM2NygWnn332GSoqKjBp0iQsWbIEhw4dGo23ISIiojQ04kc+qqqqsGHDBkyfPh2tra149NFH8fWvfx0fffQRcnNzhzw/HA4jHA7Hf+7u7h7pIRERZZyhdzglSh8jnnzU1dXF/z179mxUVVVhwoQJ+N3vfodly5YNeX5jY+OQC1SJiC52Z0sumHhQOhv1+3zk5+dj2rRp2L9/v2W8oaEBXV1d8enw4cOjPSQiIiJKolEvQ+nt7cWBAwfw7W9/2zLu8XjgsehUW1acBbdF19g8t9yhMydLLifVbEpU7W7Do9l0kQ0H5TJN3ebvkqJcv81YgOxsuQtrd5dcNurPk7uw9oTk5f/8C/k1e8Nyqa3bpvHp2Cybzrwum7LQk51iLKzksbhsutr684ae7htw3aVXibHuVrk8UfXZVw/4i+UuyuE+ed309sp/D3hc8muOK5OXsaSkVIy1d1uX78YME4c+OiLORxfGiEYQDVvvP5QJaMq6qsXjdMHrtv4cuJwOeDzWMWkeomQZ8eTju9/9Lm699VZMmDABR48excMPPwyHw4E777xzpN+KiCgtKdOAaXPvDe3L1OPMUy8OTYPudFomJk6nAy7htgYOqU0uUZKMePJx5MgR3HnnnTh58iTGjBmDG264Abt27cKYMWNG+q2IiDJUf9phlWTwWg/KBCOefGzevHmkX5KI6CLzVYrRn4awtoUyC4/FERGlMO20/xJlCiYfRDTqHnnkEWiaNmiaMWNGPB4KhVBfX4+ioiLk5ORg0aJFaG9vT+KIiWg0MfkgooS47LLL0NraGp/efvvteOz+++/Hq6++ihdffBFNTU04evQoFi5cmMTREtFoStmObwU5PngsOtg6I53iPB6XvDhZniwxFg7KZahRUy7tzc8vEGN2jZwihn3OF43K3UuzcnLE2NHjYTF24HO5s+nxHnkZ++QQJvjk0tfbv36FGKssl5fhf1r+Lsaa97eJsZgpd/R16vK26Ok8Lsb6euX1mZt7ltJFQz5M7vXK87ptOjNnafJ8MUPeUOPHVYix3I4ey8cjUQM7R7jUVur51NXVheeeew6bNm3CzTffDABYv349Zs6ciV27duHaa68d0XGkArfHC59P3ifJn1gHlCZ9thRiMet9mWmyqy2lFh75IKKEkHo+tbS0IBqNoqamJv7cGTNmYPz48Whubk7WcEeVy+mM3+PozMntdsMjTA6nE7quWU6AglKGMNnclIcoCVL2yAcRZQ67nk9tbW1wu93Iz88fNE9paSna2uSjXewLRZS+mHwQ0aiz6/nk8/nO6zXZF4ooffG0CxEl3Ok9n8rKyhCJRNDZ2TnoOe3t7ZbXiAxgXyii9MXkg4gSbqDnU3l5OebOnQuXy4Xt27fH4/v27cOhQ4dQXV0tvobH40FeXt6giYjSA0+7ENGos+v55Pf7sWzZMqxevRqFhYXIy8vDqlWrUF1dnZGVLkSUwsnHmIJCeN1DhxfskMtQdc2mW2ifXE4bjMhlik5NLn3si8rla3aHlIJRuSwUAPIL5L/gIoZchPf3I0fFWEe3TYdWp9wN2OGQlyTPK79midO6hBMAvB1yCevUPPkwe2uhPJb2zmNiLNwnr+/3//Y3MabH5AqBaPZZ/sr2y51kocufU79fLr/MNeVtH4rIn28VkS/EvGRMtvB6NjXW5+FsPZ9+8YtfQNd1LFq0COFwGLW1tfjVr341omNIJTFTIWJYf740ABBK9WNm/7xWnC4HnEJjOc0h78eIkiFlkw8iyhxn6/nk9Xqxdu1arF27NkEjSi5DKTGJ0CDfTD1mGuJ8OnToTofl3Dq72lKK4SeSiChjsAcMpQcmH0RERJRQTD6IiIgooZh8EBEljXwRMVEmY/JBRJQ0vEaDLk6sdiEiSjAFDUpKPDRN7Fyr6w44lRBzOmHAuvOxyV09pZiU/UTmFxXD5xn6RSrIkftA6Lrccryz+5QYiwZ65dc05HtZmJDvA6Fc8qrNyfGKMQCIQo5/8nf5vhSBcECMeb0eOWZxP5UBvmz5vhMFDvleEC3728VYLCK/X9gv3+djTIG8XjTI992IxuR7w/RFgmIs0CcfEo/E7O+Dodndy8Xmj12XLgeVLt+rwSXc3wEAYmH5vipKuG+M9DiNDAUdSriHkHLo0M4ojR3YGk6HE26H9bZWcMAUkw/5Xj5EyZCyyQcREfXjyRnKNLzmg4gorfEoFaUfJh9ERGmNx0Uo/TD5ICIiooRi8kFEREQJNezkY+fOnbj11ltRUVEBTdPw8ssvD4orpbBmzRqUl5fD5/OhpqYGn3322UiNl4go7TmcTjhdbsvJ4XRBdziFyQXd6bacoDkRi8FyMuXCPKKkGHa1SyAQwJw5c3DPPfdg4cKFQ+JPPPEEnn76aWzcuBETJ07ED3/4Q9TW1mLv3r3weu1LTAfRnYBF6azmkstp7Xi88nxZsG4rDgBOm/zMrlNk1KYM1+PzizEAONEmt6PvOyGXDE8qlNdvWK42hdemnHb65LFiTLd50ZhDXt/dNmXPTkeXGMt1y9upqGCyGJs8dbwYO3joXTH26d++EGNup1y+CgBKyeXbsZj8tdOdckmkyy2vU9Pmt4tpc02Apll/hqXHaWS4vVnwZVuXh8dgipeQOpweOF3W3/NgXwThgPXnMmpfGU6UcMNOPurq6lBXV2cZU0rhqaeewkMPPYTbbrsNAPD888+jtLQUL7/8MhYvXnxhoyUiIqK0N6J/3hw8eBBtbW2oqamJP+b3+1FVVYXm5uaRfCsiIiJKUyN6k7G2tjYAQGlp6aDHS0tL47EzhcNhhE+7A2N3d/dIDomIiIhSTNJP7DY2NsLv98encePGJXtIRERENIpGNPkoK+vvy9HePrivR3t7ezx2poaGBnR1dcWnw4cPj+SQiIiIKMWM6GmXiRMnoqysDNu3b8cVV1wBoP80yu7du7FixQrLeTweDzweuekZEVGmMQwTkah108qYMmAo6+olNxzQHNbzKdOAUkIjTOH1iJJl2MlHb28v9u/fH//54MGD2LNnDwoLCzF+/Hjcd999+PGPf4ypU6fGS20rKipw++23D+t9QqEYYNE6WovKXUgBuZ4sEJCvJYlE5QNAMV0uX+3tk0tiu21iY8fZr3YVk+edUCyXTU6ukEsx+0LyfGOnzRFjbiWX057qiooxX36RGMNJuTvruLJyMdYZkLv2TpoxVYzlFcilxHkFM8XYqePydjjVJZcEA4DLpixYV3KyHTVtuijb/P4wbGopbRrlQimhq63wOI2M3r4wOnv6LGPBcAhRw/q7lZuXhzy/9QaNRsOAYf19VaZ9aThRog07+XjvvffwjW98I/7z6tWrAQBLly7Fhg0b8OCDDyIQCGD58uXo7OzEDTfcgK1btw7vHh9ERDQimEZSKhp28nHTTTfZ/lWkaRoee+wxPPbYYxc0MCIiIspMSa92ISKi0cOet5SKmHwQERFRQjH5ICLKQLzWg1LZiJbaEhHR2cUMExGhQskwTRiGdWmTYRiIxawrYYxYDEbUOmYachUVUTKkbPJhaAYMi86aypBLCu0uhPV5fWIsJ1cuxTx6XC7tPXjkuBhzuuSxuNuPijEACLXLrzu1RC6nnXeTXG564IsOMZY7dowYKy6yvjkcABw73i7G8vNtSk1NeRnculyGe+y43GXW6e0UY8c7W8XYF61y91mXS/5c5OfZ3zchGJS3v3LKBxw1m7pY06YMV9dsOtfadF82+OdxUpzs7IFHs/4M6Q4FaZOZykRMuN1ArC+IWI/15znQZdPWmigJeNqFiChTMbmkFMXkg4goGRKRGLDUhVIUkw8iomTQ0J+AnGsSwqMYlEFS9poPIqKMN5wjEzyKQRmERz6IiIgooXjkg4gowZRSMIVOgZoOSIV7ylQwYlJXWyWemuFBE0o1KZt8+P3Z8HndQx6POeVS295euZxMCe2rAaCrR+5Q+vkhuZy0t1cu0/R55YNKrQflDrsAUGqx3APGjp0gxvIrJooxV49NaahXLn2tnHONPFubXPrqi8nlwgbk7RQIyLHyLLkkOCLcFwEAtOwcMVaZXSHGcvPlMuOek21iDACOtZ8UY1FNXt+hiE33UV0+6Z/tkRs3RoI25cRu67EY/HU1qsKhMPqsm9rC5dTgdAqdawMB9AhJi8+hI89lvUt3WHQIJ0omnnYhIkoHw73glBeoUgpj8kFElA6Ge/BioJqGKAUx+SAiylQ820IpiskHERERJRSTDyKiTKPO+D9RimHyQUQXZOfOnbj11ltRUVEBTdPw8ssvD4orpbBmzRqUl5fD5/OhpqYGn3322aDndHR0YMmSJcjLy0N+fj6WLVtmW02W9mxOh2haf1ia5Jc8bUZ98P81m8aDRMmQsqW2vV0diIWGlgE6Iz3iPC6LLrhxcrNUOB1ysK9XLsMtyJU7t+Zny6WPwVP2pbYlFUVibOzs/yPGPjoSEWN/2y/HrisvFGOdnfJ8pZPniDEdQh0hgEhYLsPNV3LJbPcxuXzVF7FuJQ4A5YU2y2d4xJhrdoEYC9p0ygWAP//h/4qxI4fl5XcIpa/95F8gNk10EbX5G0MXWrCHhHbvVgKBAObMmYN77rkHCxcuHBJ/4okn8PTTT2Pjxo2YOHEifvjDH6K2thZ79+6F19v/PVmyZAlaW1uxbds2RKNR3H333Vi+fDk2bdp0zuNIJzleDwqEbto+txMeoWQ2GgwhFrL+TubmeFBcaF1WfkJ1ATh1XmMlGg0pm3wQUXqoq6tDXV2dZUwphaeeegoPPfQQbrvtNgDA888/j9LSUrz88stYvHgxPvnkE2zduhXvvvsurrrqKgDAM888g1tuuQU/+9nPUFEh34uFiNITT7sQ0ag5ePAg2traUFNTE3/M7/ejqqoKzc3NAIDm5mbk5+fHEw8AqKmpga7r2L17t/ja4XAY3d3dgyYiSg9MPoho1LS19d8JtrS0dNDjpaWl8VhbWxtKSkoGxZ1OJwoLC+PPsdLY2Ai/3x+fxo0bN8KjJ6LRwuSDiNJSQ0MDurq64tPhw4eTPSQiOkdMPoho1JSV9ffHaW8f3COpvb09HisrK8OxY8cGxWOxGDo6OuLPseLxeJCXlzdoIqL0wOSDiEbNxIkTUVZWhu3bt8cf6+7uxu7du1FdXQ0AqK6uRmdnJ1paWuLPefPNN2GaJqqqqhI+5kRQ6L8Y12rCWWISzaYayi5GlAzDrnbZuXMnnnzySbS0tKC1tRVbtmzB7bffHo/fdddd2Lhx46B5amtrsXXr1mG9j64BDovvi2HToVPZfMF0yKWDhiaX2p6SKzjR3S3vDFRYLlEt98slugBw9Te+IcYqp18rxl5a/2sxVmbT2dURCYqxL/5+QH7NSZeKMW/RFDGWreRy6b6OY2LMZ8qlr5GgXNp7okeO5Y+ROwEXlV0ixoK99n9l6zZhwy137tV0+TMcjcqfKU1osw4AmpJjsZhQ0mmc+92pent7sX///vjPBw8exJ49e1BYWIjx48fjvvvuw49//GNMnTo1XmpbUVER32/MnDkTCxYswL333otnn30W0WgUK1euxOLFizO20qWrT0GopkVuMAafw7rkPMfjQEGe9f6jcuJ4zLziMvSnNoM/R5H/tx/Y/fkFjJhoZA07+ThbTT8ALFiwAOvXr4//7PHI91IgovT23nvv4RunJcyrV68GACxduhQbNmzAgw8+iEAggOXLl6OzsxM33HADtm7dGr/HBwC88MILWLlyJebNmwdd17Fo0SI8/fTTCV+W9MdbmlJ6GHbyYVfTP8Dj8dieqyWizHHTTTfZnhLQNA2PPfYYHnvsMfE5hYWFGXtDscQ6s5Xt0KMgRKlgVK752LFjB0pKSjB9+nSsWLECJ0/Kd6ZkrT4R0UjShH8TpY4RTz4WLFiA559/Htu3b8fjjz+OpqYm1NXVwTCszzuzVp+I6MLwZAulmxG/vfrixYvj/541axZmz56NyZMnY8eOHZg3b96Q5zc0NMTPEQP9V8IzASEiIspco15qO2nSJBQXFw+6Gv50rNUnIrowPLlC6WbUG8sdOXIEJ0+eRHl5+bDm01T/dCZD6MIJAJou51JOmzRLBW1eU26yisIi666UAFCWJZf2fu2qafKLAph5nVxOe+qYXGrsickdeCdVVoox02Yhy0rGiLFYSF7GPptuuJGYPF80KH8kDcjlwge+OCLGPvzoPTF23bXyOIvK5O7C3T1ySTAAuOSPBoovkUutTZvPsBGxKZm1Ke3uOt4pxsI91gMNR+X3oguXk+NBgd9nGSvwmMgWSm0rxhajXOhCXT55GiZfcbVl7POQDuAP5zVWotEw7OTDrqa/sLAQjz76KBYtWoSysjIcOHAADz74IKZMmYLa2toRHTgRUTrTNJubgl1AbOBvtoG6Fx4VoVQ07OTDrqZ/3bp1+OCDD7Bx40Z0dnaioqIC8+fPx49+9CPe64OIaKTZZBdMPCiVDTv5OFtN/+uvv35BAyIiutidU+Iw8KQznqwBUOrLh5h9UIpibxciohRzTjmDdsb/v6S+fEwx8aAUxuSDiCiDaOABD0p9TD6IiJLlzDPY53u3MIv5mIBQKhv1UtvzZcYMmI6huVEwLJeFum06tzqdLjHm0OUyxSllcidVr0/O3S6ZIN8obc4NctdaACifPluM7WleL8bGj5PHWnbZLDHmHjNZjDmz/GKsLySX/Qa75c617UcPi7FT7XLJrBGVu9P6cr1irLhY3vaHj74vxkrLx4qxWJ+87ACggmExpgVOiTFDyR2GlVXt+Zd8HnkZ3WVyrNtj/SsqFOGvrtFU4NNQmmu9jkuKCpGXY12GO2n6NFwyzbpjdH7FeJRMse40XfBp+/kNlGiUpGzyQUSUyUar1FYInPO4iBKBp12IiDLFmQ1tiVIUkw8iokyhfZVzsNqFUhmTDyKiDCJU4BKlFCYfRERElFBMPoiIiCihUrbaxeVwwuUYOrxTPXK5pRGSDzT6sqxL1wDAoctXZpXYdK493NopxiZ/bYEYq5wlx/rJJbPRnoAY8+fKZbFjpl0hxgJO6y6ZAPDx+++KsXBQHkt3d6cYO/HFITHmMOSyZ69X/riOnSiXxc4WShMBIOaQO8y6HPlyzC13QgYAZygkxvo+/0KMmTGbzrU2fyr0OhxiLKtIXsbSCuvOvcEQu9qOpr6+MLq6rL8/Uy6di4lTZ1rGxk+ZioopUy1jLq8XQP+2HnrndfkzQJQMKZt8EBFlKvtCFA26bp1MarpDjmk6BlINXvdBqY6nXYiIMhlLbikFMfkgIspkPPxBKYjJBxFRJuGRDkoDTD6IiDIJj3RQGmDyQURERAmVstUukVAYujm03C/LIw9Z88rlhi49JsaUIcd8OfJr/vMd/yzGrqubJ8byikvFGAC0//0TMeawWY7Oni4xdvwf+8TY0R65rHLHyy+LsRyf3C01FJa7vpaVyiXBeblySeDBI3I33IjNeimsuESMTZs1V4zB8Iihjk65+y4A9NmUfZ8KymPVlPz5DgXljs69Sj7Wrnrlst+Z+cJ7yRXPNAK+VvU1fO3SCZaxmV+7DmMnTrOM+XIL4MuxLsUP9XSh48hBy1jvyWPnN1CiUZKyyQcRUaZyuZ3weD04/U4c8ZjHDZfHbTmfw+mErssHrJVpnaBKjxMlC0+7EBElDS/QoIsTkw8ioqRRGInyFBa4ULrhaRcioqQZmSMfPH5C6YZHPoiIMhSPiFCqYvJBRJSBTm8uR5RqhnXapbGxES+99BI+/fRT+Hw+XHfddXj88ccxffr0+HNCoRAeeOABbN68GeFwGLW1tfjVr36F0lL78tIzmSoCU1lcoW1RfjtAi8lXdMeU3IVU0+Svp9eTJ8aumCuXaXpcchnq3j3vizEAOHX0gBgLh+WyyZ5THWLs8P69YqxXyR1/XYb8fjlOuQw5zyuXzI4pkEttW9vbxFgsKm/Dvh65tPfwQbmLLvCxGOnt7RFjXqf9Lj3mKRFjJ2PyZ8rn84qxrFx5O/mccllwT1+3GIuZ1mW/MZvvGV24yvETMOOyyyxjRcXFcDmtd81dJ0/iiNAVOdTbhb7O45ax4+2t5zdQolEyrCMfTU1NqK+vx65du7Bt2zZEo1HMnz8fgcBXraHvv/9+vPrqq3jxxRfR1NSEo0ePYuHChSM+cCKidOVyueD1ei0np8MBXdMsJ8OIIRIOW0+RMKLRiOVkxOR7yxAlw7COfGzdunXQzxs2bEBJSQlaWlpw4403oqurC8899xw2bdqEm2++GQCwfv16zJw5E7t27cK11147ciMnIiKitHRB13x0dfXfUbOwsBAA0NLSgmg0ipqamvhzZsyYgfHjx6O5udnyNcLhMLq7uwdNRERElLnOO/kwTRP33Xcfrr/+elx++eUAgLa2NrjdbuTn5w96bmlpKdrarM/lNzY2wu/3x6dx48ad75CIiIgoDZx38lFfX4+PPvoImzdvvqABNDQ0oKurKz4dPiz37yAiIqL0d143GVu5ciVee+017Ny5E5WVlfHHy8rKEIlE0NnZOejoR3t7O8rKyixfy+PxwOORr9QnIroYDe36QpQ5hpV8KKWwatUqbNmyBTt27MDEiRMHxefOnQuXy4Xt27dj0aJFAIB9+/bh0KFDqK6uHubQzC+nMx6Nye02na4sMWbE5NLBCOQrwUv91h0kAeD1//uaGCsslUs4S8rtTy1F+uTutC6XnKjlZMslnE5dLovNtikLLispEmPBnlNizOeQx3ny+AkxFo3I2ynXK5eaRnrlUtvP3n9PjLV++jcxFo4FxRhc8voEAMNufVfKZcjIlj/fukcue/YKJbMAUAB5vc28bKLl433BKID/J853up07d+LJJ59ES0sLWltbsWXLFtx+++3x+F133YWNGzcOmqe2tnbQBewdHR1YtWoVXn31Vei6jkWLFuGXv/wlcnJyzmkM6Wbf3r3INgOWsZz8EniyrL/LLo8PLrf19jQiIURD1q8ZCfad30CJRsmwko/6+nps2rQJr7zyCnJzc+PXcfj9fvh8Pvj9fixbtgyrV69GYWEh8vLysGrVKlRXV7PShShDBQIBzJkzB/fcc49YVr9gwQKsX78+/vOZRzuXLFmC1tbWeAn/3XffjeXLl2PTpk2jOvZk6esNoKuz0zIWNRzwBq3vaZOdk4fsXOvjIUY0DNOwTkJN3reFUsywko9169YBAG666aZBj69fvx533XUXAOAXv/hF/C+X028yRkSZqa6uDnV1dbbP8Xg84qnXTz75BFu3bsW7776Lq666CgDwzDPP4JZbbsHPfvYzVFRUjPiYiSi5hn3a5Wy8Xi/Wrl2LtWvXnvegiCiz7NixAyUlJSgoKMDNN9+MH//4xygq6j+l19zcjPz8/HjiAQA1NTXQdR27d+/Gt771LcvXDIfDCIfD8Z9Zpk+UPtjbhYhG1YIFC/D8889j+/btePzxx9HU1IS6ujoYRv+pgLa2NpSUDL4dvdPpRGFhoViiD7BMnyidnVe1CxHRuVq8eHH837NmzcLs2bMxefJk7NixA/PmzTvv121oaMDq1avjP3d3dzMBIUoTPPJBRAk1adIkFBcXY//+/QD6S/SPHTs26DmxWAwdHR3idSJA/3UkeXl5g6bMo4R/E6W3lD3yYZoaTHPoVd1um06qXqfc1Ra6XDGvHHLpoxmRO6meOCEfEu49Lsd8Uftz0ybkZSwskEtf8yvGiLGYERZjXxyVx6psdni6Ln98IjaNrByaXNqb7ZXLpW2aFsNhF7TpWmxE5LJm3eLzN6C7Ty4zBoCIRy7Tza2Qt0XA1ynGeky5DDcUkP+OKMqbJMaKhVLqQEB+rwt15MgRnDx5EuXl5QCA6upqdHZ2oqWlBXO/7BT95ptvwjRNVFVVjdo4kqmvpxPdJ623WainWyypjxUUQSsstowpU1l3AgcQ65NL0YmSIWWTDyJKD729vfGjGABw8OBB7NmzB4WFhSgsLMSjjz6KRYsWoaysDAcOHMCDDz6IKVOmoLa2FgAwc+ZMLFiwAPfeey+effZZRKNRrFy5EosXL87YSpdYJIxIyDpBNaMxxBxOWN1mLMvthOGzTkwUNCjhYLYZk/+IIkoGnnYhogvy3nvv4corr8SVV14JAFi9ejWuvPJKrFmzBg6HAx988AH++Z//GdOmTcOyZcswd+5c/O///u+ge3288MILmDFjBubNm4dbbrkFN9xwA/7zP/8zWYuUAs5MPHjKhTILj3wQ0QW56aabbMvwX3/99bO+RmFhYcbeUOz8nHnKjzdap8zCIx9ERGmFR0Eo/TH5ICJKKzwKQumPyQcRERElVMpe86FrHuja0OF5PXKHTmXTnTbbJ5dwZudal64BQF9U7iRalOsWY06bsUS62sUYAJi6/Lp9LrmktLTUukMpAJgRuXRy+uxKMfbOW9vFWETJnTJdmvzXWbBXni8vV75Xg9spf1wdmrxeekPyNjzYKpfMdnbK2zCsWXcPHTBmmpzXj8236c6r5G1/6oS83twhm/LlsTadifusG44Fg2xENpq8Lh3ZHuuSeocOODTr9e8wQ4j1WZfqK02H0oRqF5v9GFEypGzyQUSUqVxOHV63dfKhQ0EXruvQzSjM6GkluqcVxSjoUMK9d1hqS6mGp12IiNIVL/+gNMXkg4goE7EohlIYkw8ioqQZxQxh4KgIkxBKQUw+iIiSJgHnTXhqhlIQkw8ioqTioQm6+KRstYvLqcHtHJob9YXljqAOr013Wod1MyYA6IvKHUgdLnnH4HHLJZMulzwWd5ZfjAGAP0+et+24XKbbN1YumS0ZN0WMfXHshBi77OrrxVjv8aNi7O9/+1iMBXo7xZjTIW8Lv18uw9Ugl9q2fiGP89DnNl1tPfJ2yCuVS7cBYEyhzVhtSn+1Dvk9C07JX9exJYVirDJf/lzs32vd0TgYYnXEaHI4HHAJpeO65oAulMw63T44hI63usMJp8u65Nrrlfd/RMmQsskHEVGmcjoccDmtEwWHww1dd1k1tf0y+fBazud2OeETOt56PfL9Y4iSgaddiIhS0Yhdq8HTOpR6mHwQEWU0XnFKqYfJBxERESUUkw8iIiJKKCYfREQZhdd4UOobVvLR2NiIq6++Grm5uSgpKcHtt9+Offv2DXrOTTfdBE3TBk3f+c53RnTQRETpTJmAKUxK6ejfNQ+dTMNELBqxnIxYDEbMgBEzv/z/V5NpyqXoRMkwrFLbpqYm1NfX4+qrr0YsFsO///u/Y/78+di7dy+ys7+6P8G9996Lxx57LP5zVpb9PRGslBTpyPIOzY2iJ0+K8wQN+QsWsOmArnS5fbjTpo17Xp7cqtwt1NsDQDBg3RJ7gM9ls1kicuy9d94RY5Omy/cHOXLE+l4PAKDr8sVqWR55GR0291Xx+eR7WQR65ft8BINyLBaLiLEcofwQAK67cpoY8+bK9+qIOWJiDACMaJ8YCx6W7/Oh91iXUQJASVauGLty2mXyfPmlYqyl9aDl46GI/fLRhTGVBtMUvltKhwbrjremYSCqrPdXOhTcwv7KjMn7OKJkGFbysXXr1kE/b9iwASUlJWhpacGNN94YfzwrKwtlZWUjM0IiIiLKKBd0zUdXV//dIQsLB99d8YUXXkBxcTEuv/xyNDQ0oK9P/iswHA6ju7t70EREROeIl3hQGjrvO5yapon77rsP119/PS6//PL44//yL/+CCRMmoKKiAh988AG+//3vY9++fXjppZcsX6exsRGPPvro+Q6DiOjixtt4UBo67+Sjvr4eH330Ed5+++1Bjy9fvjz+71mzZqG8vBzz5s3DgQMHMHny5CGv09DQgNWrV8d/7u7uxrhx4853WERERJTiziv5WLlyJV577TXs3LkTlZVy0yoAqKqqAgDs37/fMvnweDzweNj0iIgozqKvC1EmGVbyoZTCqlWrsGXLFuzYsQMTJ0486zx79uwBAJSXl5/XAImIMo1pGojFrDsH67oOBaGiRQM0KSlRJpRpfQFIKCR3AydKhmElH/X19di0aRNeeeUV5Obmoq2tv0TT7/fD5/PhwIED2LRpE2655RYUFRXhgw8+wP33348bb7wRs2fPHtbAKivdyPENLeX0a3Ip4v7D8oWt7cflq7IihnzkJSdHXkWBPrkdu2H2ijHHWa7z7TgulxP39MolkKGoPB6HkmO5OQVirL2tQ4wdCcglo6aS/2wrHSOXKGum3Mr9VOcpMebJlrdhvl8uUXU75G0RjtiUJwodSQcEwvLrRnrlebNNeb4p4+QKsooyeZ0ePiKXWZ88bv2dCUdZmjmaItEQwhHrdW+YQTid1p8DDdqXe4+h36+AocOMfVmie8aRk1On5O8xUTIMK/lYt24dgP4biZ1u/fr1uOuuu+B2u/HGG2/gqaeeQiAQwLhx47Bo0SI89NBDIzZgIqKLm1Vir87+FKIUMuzTLnbGjRuHpqamCxoQERENF7MNSi/s7UJEREQJxeSDiIiIEorJBxERESXUed9kjIiIzpOSr6FTSoMplMxqkOczDUDqrWkqdrWl1JKyyUdevgs5WUNLEoNCaSAAFJRYd4IEAGTLnXVPtMs18KGI3C3V6Za7ntrMBvMsZYxRQx5PV1AuN8226d4a6pPLYoOhE2IsYjNWwyamlLwtervlbZiX57OJ+cVYMCi/5omT8jrLyZE77Gq6fGBQi9lffO12ysvhkavF4XbL6+2SKZeIsWCfPJ6dO/eKsQ/+dszy8ZhNh2i6cKZpwjCk74+CUtafPdMAIGwaw4zBFBKTqE3XZ6Jk4GkXIiIiSigmH0RERJRQTD6IiIgooZh8EBERUUIx+SAiIqKEStlqFyKijGVzN3RN06Fp1lVPugZIPRs13QSkChqxFS5RcqRs8uHwOuH0Dh2eN88tzlOYIx/IcQbl8lWXTy4r7D5ls4oM+f183hJ5Npd9GaMR7hRj7ix5PC6nvG4cDrnUOGxzD4BIVC7RUzadazWbSlQVkct+DTkEl10nWbdcZtx5Si61DUbkLrr+fLmU2mlThgsAus226IPcmbj9RI8YO2XT0bgnIHctfmPHp/L7CRXK0n0mrDQ2NuKll17Cp59+Cp/Ph+uuuw6PP/44pk+fHn9OKBTCAw88gM2bNyMcDqO2tha/+tWvUFpaGn/OoUOHsGLFCrz11lvIycnB0qVL0djYCKczZXdT5013aHC6rT9DbmcOXE7r76tpmpBabEWNMFQsaBnTHDa3ISBKAp52IaIL0tTUhPr6euzatQvbtm1DNBrF/PnzEQgE4s+5//778eqrr+LFF19EU1MTjh49ioULF8bjhmHgm9/8JiKRCN555x1s3LgRGzZswJo1a5KxSBnk3JNIokTKvD8piCihtm7dOujnDRs2oKSkBC0tLbjxxhvR1dWF5557Dps2bcLNN98MAFi/fj1mzpyJXbt24dprr8Wf/vQn7N27F2+88QZKS0txxRVX4Ec/+hG+//3v45FHHoHbLR9JIjs83UKpiUc+iGhEdXX1nwIqLCwEALS0tCAajaKmpib+nBkzZmD8+PFobm4GADQ3N2PWrFmDTsPU1taiu7sbH3/8cQJHT0SJwCMfRDRiTNPEfffdh+uvvx6XX345AKCtrQ1utxv5+fmDnltaWoq2trb4c05PPAbiAzEr4XAY4fBX13J1d3eP1GKkNAUez6D0xyMfRDRi6uvr8dFHH2Hz5s2j/l6NjY3w+/3xady4caP+nqmAiQdlAiYfRDQiVq5ciddeew1vvfUWKisr44+XlZUhEomgs7Nz0PPb29tRVlYWf057e/uQ+EDMSkNDA7q6uuLT4cOHR3BpRpeuK2ia9QTNhFKGMJkwlWE5KWVCmcLE604pxaTsaZdArxOaaVFa6cgR58nJlus0XT7525dt02bU75fLUHu7rcva+mPtcqzvLF1tQ3I8110kxrwuuRQ1FpZLjZ1OOQcVqgEBAC6PXL6nafKMWTnyx063+UTGDLnU1O2TZ8zLl8uMOzrk0tYemxLkvEJ5OwBAn00X0c/+cVKMffqh/Au0tFAu/S2tlJcRurwcxf5cy8cN08Tnp2zqnk+jlMKqVauwZcsW7NixAxMnThwUnzt3LlwuF7Zv345FixYBAPbt24dDhw6huroaAFBdXY2f/OQnOHbsGEpK+svUt23bhry8PFx66aWW7+vxeODxyCXWqSwrN4r8Yuv1GwqaiEYDlrFw1EQ0ar0vM2ImTKHbcjQif/+JkiFlkw8iSg/19fXYtGkTXnnlFeTm5sav0fD7/fD5fPD7/Vi2bBlWr16NwsJC5OXlYdWqVaiursa1114LAJg/fz4uvfRSfPvb38YTTzyBtrY2PPTQQ6ivr0/bBGPk8fAFZQ4mH0R0QdatWwcAuOmmmwY9vn79etx1110AgF/84hfQdR2LFi0adJOxAQ6HA6+99hpWrFiB6upqZGdnY+nSpXjssccStRhpQMPQBISXn1J6YvJBRBdEncMFBV6vF2vXrsXatWvF50yYMAF/+MMfRnJoFwEmHpSeeMEpERERJRSTDyKiZLA6gzJqL06UWoaVfKxbtw6zZ89GXl4e8vLyUF1djT/+8Y/xeCgUQn19PYqKipCTk4NFixYNKZ8jIrrYaZqCdma5rU357enP6U8shk79jWsHfsYZcaLUMqxrPiorK/HTn/4UU6dOhVIKGzduxG233Yb3338fl112Ge6//378/ve/x4svvgi/34+VK1di4cKF+POf/zzsgR09DGRZVMCGO+Wy2Nwxcimm12fTvVSu3kVhobyKegNCS1AAnZ1y7NRJ+z4Vp+RKTDhMubzVtDn3bkittgHAlGN22ammy+ebHTadSIM23YCVvAnhMuVtGOvrEGNGUN4Whk2n3M5eeb6IfbU0OmzKsP+xX97AnSetSywBIBKQ37TMb30vDACYOWGsGJOGGTVM/PUf8jqlC1M8JoJxE6xXfl9fEBGhUjvY50AoaL0PCIWAoPDxcXvl0m+iZBhW8nHrrbcO+vknP/kJ1q1bh127dqGysvKszaOIiIiIzvuaD8MwsHnzZgQCAVRXV59T8ygiIiKiYZfafvjhh6iurkYoFEJOTg62bNmCSy+9FHv27Dlr8ygrF2tzKCIioovVsI98TJ8+HXv27MHu3buxYsUKLF26FHv37j3vAVyszaGIiIguVsNOPtxuN6ZMmYK5c+eisbERc+bMwS9/+ctzah5lJZ2bQxEREdHwXfB9PkzTRDgcHtQ8asCZzaOseDyeeOnuwERERMNjf69T3gmVUsuwrvloaGhAXV0dxo8fj56eHmzatAk7duzA66+/fk7No4bDcBXBcA1tKBV1XyXOEzblzo167IQY8/rlL2b+GLm0t0CX60IL++ROop0dPjEGAJ0n5HLaYEDeZEbMpoRXyXmmGZPHGgrKnU3dbvn9HE55GXpC8vsFe206Eyu5XDBXt+7OCgCmLl9HFI3K69OTLZcuey0+m6fLd8tjnYR8MTZrTrYYmz57jhi7ZMoUMXbNtXLJ8JGjvZaPhyMx4K//EOejC1M8thLjppVYxsJhEzGhqjrY50Q4ZF0eHg5qCAasv+fHzS8AHD2foRKNimElH8eOHcO//uu/orW1FX6/H7Nnz8brr7+Of/qnfwJw9uZRRERERMNKPp577jnb+Lk0jyIioq+wLy1djNjbhYgoiZh40MWIyQcRERElFJMPIiIiSqhh3+F0tKkvm6P1hayrBYLC4wCgueTGY6YpV6boffKBT2dAfk3ocqOvQFCu6AgEbV4TQJ9dNUhIrsCwWUTY5Zm21S5heayGsmksZ9PILhiWlyEUkd9PKTnmtKk8CkXkWNhunWnyOB1KruYBgHBUfuGIVMoAwGUzn/SdAIDegFwlFLTZhmFh3QyMX9k0K0w16TTWYMhAb+DLdYyvTr0oAJGwCekrGepTCAtFfeGghlDI+nsubWei0XAu30VNpdg39siRI7zLKVGKOHz4MCorK5M9jHPy97//HZMnT072MIgueuey30i55MM0TRw9ehS5ubnQNA3d3d0YN24cDh8+zBuQnYbrRcZ1Y20460UphZ6eHlRUVEDX0+PsbGdnJwoKCnDo0CH4/f5kD+eCZMpnmMuRekZzWYaz30i50y66rltmTLz7qTWuFxnXjbVzXS/p9gt8YGfn9/szZrtnymeYy5F6RmtZznW/kR5/0hAREVHGYPJBRERECZXyyYfH48HDDz8Mj8e+l8bFhutFxnVjLdPXSyYtX6YsC5cj9aTKsqTcBadERESU2VL+yAcRERFlFiYfRERElFBMPoiIiCihmHwQERFRQqV08rF27Vpccskl8Hq9qKqqwl/+8pdkDynhdu7ciVtvvRUVFRXQNA0vv/zyoLhSCmvWrEF5eTl8Ph9qamrw2WefJWewCdTY2Iirr74aubm5KCkpwe233459+/YNek4oFEJ9fT2KioqQk5ODRYsWob29PUkjTpx169Zh9uzZ8ZsIVVdX449//GM8nqnrJd32F4888gg0TRs0zZgxIx5P1e00Evukjo4OLFmyBHl5ecjPz8eyZcvQ29ubwKXod7Zlueuuu4ZsowULFgx6Tiosy0jtDw8dOoRvfvObyMrKQklJCb73ve8hFhudvkApm3z89re/xerVq/Hwww/jr3/9K+bMmYPa2locO3Ys2UNLqEAggDlz5mDt2rWW8SeeeAJPP/00nn32WezevRvZ2dmora1FKCQ3GssETU1NqK+vx65du7Bt2zZEo1HMnz8fgUAg/pz7778fr776Kl588UU0NTXh6NGjWLhwYRJHnRiVlZX46U9/ipaWFrz33nu4+eabcdttt+Hjjz8GkJnrJV33F5dddhlaW1vj09tvvx2Ppep2Gol90pIlS/Dxxx9j27ZteO2117Bz504sX748UYsQd7ZlAYAFCxYM2ka/+c1vBsVTYVlGYn9oGAa++c1vIhKJ4J133sHGjRuxYcMGrFmzZnQGrVLUNddco+rr6+M/G4ahKioqVGNjYxJHlVwA1JYtW+I/m6apysrK1JNPPhl/rLOzU3k8HvWb3/wmCSNMnmPHjikAqqmpSSnVvx5cLpd68cUX48/55JNPFADV3NycrGEmTUFBgfrv//7vjF0v6bi/ePjhh9WcOXMsY+mync5nn7R3714FQL377rvx5/zxj39UmqapL774ImFjP9OZy6KUUkuXLlW33XabOE+qLsv57A//8Ic/KF3XVVtbW/w569atU3l5eSocDo/4GFPyyEckEkFLSwtqamrij+m6jpqaGjQ3NydxZKnl4MGDaGtrG7Se/H4/qqqqLrr11NXVBQAoLCwEALS0tCAajQ5aNzNmzMD48eMvqnVjGAY2b96MQCCA6urqjFwv6by/+Oyzz1BRUYFJkyZhyZIlOHToEID0/fyeyz6pubkZ+fn5uOqqq+LPqampga7r2L17d8LHfDY7duxASUkJpk+fjhUrVuDkyZPxWKouy/nsD5ubmzFr1iyUlpbGn1NbW4vu7u74UdORlJLJx4kTJ2AYxqCVAAClpaVoa2tL0qhSz8C6uNjXk2mauO+++3D99dfj8ssvB9C/btxuN/Lz8wc992JZNx9++CFycnLg8Xjwne98B1u2bMGll16akeslXfcXVVVV2LBhA7Zu3Yp169bh4MGD+PrXv46enp603U7nsk9qa2tDSUnJoLjT6URhYWHKLduCBQvw/PPPY/v27Xj88cfR1NSEuro6GIYBIDWX5Xz3h21tbZbbbSA20lKuqy3RcNXX1+Ojjz4adL78Yjd9+nTs2bMHXV1d+J//+R8sXboUTU1NyR4Wnaauri7+79mzZ6OqqgoTJkzA7373O/h8viSOjAYsXrw4/u9Zs2Zh9uzZmDx5Mnbs2IF58+YlcWSydNkfpuSRj+LiYjgcjiFX4ra3t6OsrCxJo0o9A+viYl5PK1euxGuvvYa33noLlZWV8cfLysoQiUTQ2dk56PkXy7pxu92YMmUK5s6di8bGRsyZMwe//OUvM3K9ZMr+Ij8/H9OmTcP+/fvTdjudyz6prKxsyIXAsVgMHR0dKb1sADBp0iQUFxdj//79AFJvWS5kf1hWVma53QZiIy0lkw+32425c+di+/bt8cdM08T27dtRXV2dxJGllokTJ6KsrGzQeuru7sbu3bszfj0ppbBy5Ups2bIFb775JiZOnDgoPnfuXLhcrkHrZt++fTh06FDGrxsrpmkiHA5n5HrJlP1Fb28vDhw4gPLy8rTdTueyT6qurkZnZydaWlriz3nzzTdhmiaqqqoSPubhOHLkCE6ePIny8nIAqbMsI7E/rK6uxocffjgomdq2bRvy8vJw6aWXjsqgU9LmzZuVx+NRGzZsUHv37lXLly9X+fn5g67EvRj09PSo999/X73//vsKgPr5z3+u3n//ffX5558rpZT66U9/qvLz89Urr7yiPvjgA3XbbbepiRMnqmAwmOSRj64VK1Yov9+vduzYoVpbW+NTX19f/Dnf+c531Pjx49Wbb76p3nvvPVVdXa2qq6uTOOrE+MEPfqCamprUwYMH1QcffKB+8IMfKE3T1J/+9CelVGaul3TcXzzwwANqx44d6uDBg+rPf/6zqqmpUcXFxerYsWNKqdTdTiOxT1qwYIG68sor1e7du9Xbb7+tpk6dqu68886UWpaenh713e9+VzU3N6uDBw+qN954Q33ta19TU6dOVaFQKKWWZST2h7FYTF1++eVq/vz5as+ePWrr1q1qzJgxqqGhYVTGnLLJh1JKPfPMM2r8+PHK7Xara665Ru3atSvZQ0q4t956SwEYMi1dulQp1V/a9sMf/lCVlpYqj8ej5s2bp/bt25fcQSeA1ToBoNavXx9/TjAYVP/2b/+mCgoKVFZWlvrWt76lWltbkzfoBLnnnnvUhAkTlNvtVmPGjFHz5s2LJx5KZe56Sbf9xR133KHKy8uV2+1WY8eOVXfccYfav39/PJ6q22kk9kknT55Ud955p8rJyVF5eXnq7rvvVj09PSm1LH19fWr+/PlqzJgxyuVyqQkTJqh77713SEKbCssyUvvDf/zjH6qurk75fD5VXFysHnjgARWNRkdlzNqXAyciIiJKiJS85oOIiIgyF5MPIiIiSigmH0RERJRQTD6IiIgooZh8EBERUUIx+SAiIqKEYvJBRERECcXkg4iIiBKKyQcRERElFJMPIiIiSigmH0RERJRQTD6IiIgoof4/N1Cc/V7M0REAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "# We can see behind the scenes how PyTorchSmoothedViT processes input by passing in the first few CIFAR\n", "# images into art_model.ablator.forward along with a start position to retain pixels from the original image.\n", - "ablated = art_model.ablator.forward(x_train[0:10], column_pos=6)\n", - "ablated = ablated.cpu().detach().numpy()\n" + "original_image = np.moveaxis(x_train, [1], [3])\n", + "\n", + "ablated = art_model.ablator.forward(torch.from_numpy(x_train[0:10]).to(device), column_pos=6)\n", + "ablated = ablated.cpu().detach().numpy()\n", + "\n", + "# Note the shape:\n", + "# - The ablator adds an extra channel to signify the ablated regions of the input.\n", + "# - The input is reshaped to be 224 x 224 to match the image shape that the ViT is expecting\n", + "print(f\"The shape of the ablated image is {ablated.shape}\")\n", + "\n", + "ablated_image = ablated[:, 0:3, :, :]\n", + "\n", + "# shift the axis to disply\n", + "ablated_image = np.moveaxis(ablated_image, [1], [3])\n", + "\n", + "# plot the figure: Note the axis scale!\n", + "f, axarr = plt.subplots(1,2)\n", + "axarr[0].imshow(original_image[0])\n", + "axarr[1].imshow(ablated_image[0])" ] }, { @@ -139,10 +932,15 @@ "metadata": {}, "outputs": [], "source": [ - "# We can now train the model \n", + "# We can now train the model\n", + "from torchvision import transforms\n", "\n", "scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1)\n", - "art_model.fit(x_train, y_train, nb_epochs=30, update_batchnorm=True, scheduler=scheduler)\n", + "art_model.fit(x_train, y_train, \n", + " nb_epochs=30, \n", + " update_batchnorm=True, \n", + " scheduler=scheduler,\n", + " transform=transforms.Compose([transforms.RandomHorizontalFlip()]))\n", "torch.save(art_model.model.state_dict(), 'trained.pt')\n", "\n" ] @@ -151,29 +949,23 @@ "cell_type": "code", "execution_count": null, "id": "046b8168", - "metadata": {}, - "outputs": [], + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Normal Acc 0.89 Cert Acc 0.68: 49%|██████████████████████████████████████████████████████████████████████▏ | 38/78 [00:34<00:34, 1.15it/s]" + ] + } + ], "source": [ "# Perform certification\n", "art_model.model.load_state_dict(torch.load('trained.pt'))\n", - "art_model.eval_and_certify(x_test, y_test)" - ] - }, - { - "cell_type": "markdown", - "id": "128ce03a", - "metadata": {}, - "source": [ - "We can also setup the PyTorchSmoothedViT if we start with a ViT model directly.\n" + "acc, cert_acc = art_model.eval_and_certify(x_test, y_test)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2f41e078", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -192,7 +984,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.8.16" } }, "nbformat": 4, diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index 7a83d4e16c..42897414e1 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -21,6 +21,8 @@ from art.utils import load_dataset from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT +from art.estimators.certification.smoothed_vision_transformers.pytorch import ArtViT + from tests.utils import ARTTestException @@ -63,8 +65,9 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): """ Check that the ablation is being performed correctly """ - from art.estimators.certification.smoothed_vision_transformers.smooth_vit import ColumnAblator import torch + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + from art.estimators.certification.smoothed_vision_transformers.smooth_vit import ColumnAblator try: cifar_data = fix_get_cifar10_data[0] cifar_labels = fix_get_cifar10_data[1] @@ -75,7 +78,7 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): original_shape=(3, 32, 32), output_shape=(3, 224, 224)) - cifar_data = torch.from_numpy(cifar_data) + cifar_data = torch.from_numpy(cifar_data).to(device) # check that the ablation functioned when in the middle of the image ablated = col_ablator.forward(cifar_data, column_pos=10) @@ -167,3 +170,147 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 assert torch.equal(cert_and_correct, torch.tensor([True, False, False])) except ARTTestException as e: art_warning(e) + + +@pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") +def test_equivalence(fix_get_cifar10_data): + import torch + + class MadrylabImplementations: + """ + Code adapted from the implementation in https://github.com/MadryLab/smoothed-vit + to check against our own functionality. + + Original License: + + MIT License + + Copyright (c) 2021 Madry Lab + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + + """ + + def __init__(self): + pass + + @classmethod + def token_dropper(cls, x, mask): + """ + The implementation of dropping tokens has been done slightly differently in this tool. + Here we check that it is equivalent to the original implementation + """ + class MaskProcessor(torch.nn.Module): + def __init__(self, patch_size=16): + super().__init__() + self.avg_pool = torch.nn.AvgPool2d(patch_size) + + def forward(self, ones_mask): + B = ones_mask.shape[0] + ones_mask = ones_mask[0].unsqueeze(0) # take the first mask + ones_mask = self.avg_pool(ones_mask)[0] + ones_mask = torch.where(ones_mask.view(-1) > 0)[0] + 1 + ones_mask = torch.cat([torch.cuda.IntTensor(1).fill_(0), ones_mask]).unsqueeze(0) + ones_mask = ones_mask.expand(B, -1) + return ones_mask + + mask_processor = MaskProcessor() + patch_mask = mask_processor(mask) + + # x = self.pos_drop(x) # B, N, C + if patch_mask is not None: + # patch_mask is B, K + B, N, C = x.shape + if len(patch_mask.shape) == 1: # not a separate one per batch + x = x[:, patch_mask] + else: + patch_mask = patch_mask.unsqueeze(-1).expand(-1, -1, C) + x = torch.gather(x, 1, patch_mask) + return x + + @classmethod + def embedder(cls, x, pos_embed, cls_token): + """ + NB, original code used the pos embed from the divit rather than vit + (which we pull from our model) which we use here. + """ + x = torch.cat((cls_token.expand(x.shape[0], -1, -1), x), dim=1) + return x + pos_embed + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + """ + This is a copy of the function in ArtViT.forward_features + except we also perform an equivalence assertion compared to the implementation + in https://github.com/MadryLab/smoothed-vit (see MadrylabImplementations class above) + + The forward pass of the ViT. + + :param x: Input data. + :return: The input processed by the ViT backbone + """ + import copy + + ablated_input = False + if x.shape[1] == self.in_chans + 1: + ablated_input = True + + if ablated_input: + x, ablation_mask = x[:, :self.in_chans], x[:, self.in_chans:self.in_chans + 1] + + x = self.patch_embed(x) + + madry_embed = MadrylabImplementations.embedder(copy.copy(x), self.pos_embed, self.cls_token) + x = self._pos_embed(x) + assert torch.equal(madry_embed, x) + + # pass the x into the token dropping code from ... + madry_dropped = MadrylabImplementations.token_dropper(copy.copy(x), ablation_mask) + + if self.to_drop_tokens and ablated_input: + ones = self.ablation_mask_embedder(ablation_mask) + to_drop = torch.sum(ones, dim=2) + indexes = torch.gt(torch.where(to_drop > 1, 1, 0), 0) + x = self.drop_tokens(x, indexes) + + print(torch.equal(madry_dropped, x)) + assert torch.equal(madry_dropped, x) + + x = self.norm_pre(x) + x = self.blocks(x) + + return self.norm(x) + + # Replace the forward_features with the forward_features code with checks. + ArtViT.forward_features = forward_features + + art_model = PyTorchSmoothedViT(model='vit_small_patch16_224', + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + load_pretrained=False, + replace_last_layer=True) + + cifar_data = fix_get_cifar10_data[0][:50] + cifar_labels = fix_get_cifar10_data[1][:50] + + scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[1], gamma=0.1) + art_model.fit(cifar_data, cifar_labels, nb_epochs=2, update_batchnorm=True, scheduler=scheduler) From a69b3e5d6b3e1288f6955331e09ddaa6de3f9ab2 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Mon, 29 May 2023 15:16:53 +0100 Subject: [PATCH 15/55] specify certification size and initial refactor to handle last batch in certification Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/pytorch.py | 29 ++++++++++++++----- dev.py | 13 ++++----- notebooks/smoothed_vision_transformers.ipynb | 2 +- 3 files changed, 28 insertions(+), 16 deletions(-) diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers/pytorch.py index 96bc291951..bfb6c1dd7c 100644 --- a/art/estimators/certification/smoothed_vision_transformers/pytorch.py +++ b/art/estimators/certification/smoothed_vision_transformers/pytorch.py @@ -629,20 +629,28 @@ def fit( # pylint: disable=W0221 scheduler.step() def eval_and_certify( - self, x: np.ndarray, y: np.ndarray, batch_size: int = 128 + self, + x: np.ndarray, + y: np.ndarray, + size_to_certify: int, + batch_size: int = 128, + drop_last: bool = False, + verbose: bool = True, ) -> Tuple["torch.Tensor", "torch.Tensor"]: """ Evaluates the ViT's normal and certified performance over the supplied data. :param x: Evaluation data. :param y: Evaluation labels. + :param size_to_certify: The size of the patch to certify against. + If not provided will default to the ablation size. :param batch_size: batch size when evaluating. + :param drop_last: + :param verbose: If to display the progress bar :return: The accuracy and certified accuracy over the dataset """ self.model.eval() - drop_last = True - verbose = True y = check_and_transform_label_format(y, nb_classes=self.nb_classes) # Apply preprocessing @@ -659,12 +667,17 @@ def eval_and_certify( pbar = tqdm(range(num_batch), disable=not verbose) accuracy = [] cert_acc = [] + with torch.no_grad(): for m in pbar: - i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size : (m + 1) * batch_size])).to( - self._device - ) - o_batch = torch.from_numpy(y_preprocessed[m * batch_size : (m + 1) * batch_size]).to(self._device) + if m == num_batch and not drop_last: + i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size :])).to(self._device) + o_batch = torch.from_numpy(y_preprocessed[m * batch_size :]).to(self._device) + else: + i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size : (m + 1) * batch_size])).to( + self._device + ) + o_batch = torch.from_numpy(y_preprocessed[m * batch_size : (m + 1) * batch_size]).to(self._device) predictions = [] pred_counts = torch.zeros((batch_size, self.nb_classes)).to(self._device) for pos in range(i_batch.shape[-1]): @@ -676,7 +689,7 @@ def eval_and_certify( predictions.append(model_outputs) _, cert_and_correct, top_predicted_class = self.ablator.certify( - pred_counts, size_to_certify=4, label=o_batch + pred_counts, size_to_certify=size_to_certify, label=o_batch ) cert_acc.append(torch.sum(cert_and_correct) / batch_size) acc = torch.sum(top_predicted_class == o_batch) / batch_size diff --git a/dev.py b/dev.py index 2894682d7c..007df78000 100644 --- a/dev.py +++ b/dev.py @@ -33,7 +33,6 @@ def get_cifar_data(): (x_train, y_train), (x_test, y_test) = get_cifar_data() -x_test = torch.from_numpy(x_test) art_model = PyTorchSmoothedViT(model='vit_small_patch16_224', loss=torch.nn.CrossEntropyLoss(), @@ -47,12 +46,12 @@ def get_cifar_data(): ) scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1) -art_model.fit(x_train, y_train, - nb_epochs=30, - update_batchnorm=True, - scheduler=scheduler, - transform=transforms.Compose([transforms.RandomHorizontalFlip()])) +# art_model.fit(x_train, y_train, +# nb_epochs=30, +# update_batchnorm=True, +# scheduler=scheduler, +# transform=transforms.Compose([transforms.RandomHorizontalFlip()])) # torch.save(art_model.model.state_dict(), 'trained.pt') # art_model.model.load_state_dict(torch.load('trained.pt')) -art_model.eval_and_certify(x_train, y_train) +art_model.eval_and_certify(x_test, y_test, size_to_certify=4) diff --git a/notebooks/smoothed_vision_transformers.ipynb b/notebooks/smoothed_vision_transformers.ipynb index 22c896f161..91c0d8c874 100644 --- a/notebooks/smoothed_vision_transformers.ipynb +++ b/notebooks/smoothed_vision_transformers.ipynb @@ -964,7 +964,7 @@ "source": [ "# Perform certification\n", "art_model.model.load_state_dict(torch.load('trained.pt'))\n", - "acc, cert_acc = art_model.eval_and_certify(x_test, y_test)" + "acc, cert_acc = art_model.eval_and_certify(x_test, y_test, size_to_certify=4)" ] } ], From 3daab1261b90391eb592b80f680772e880423a5c Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Mon, 29 May 2023 15:51:57 +0000 Subject: [PATCH 16/55] updated metrics and notebook Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/__init__.py | 5 +- .../smoothed_vision_transformers/pytorch.py | 47 +++++++++---------- notebooks/smoothed_vision_transformers.ipynb | 10 ++-- 3 files changed, 32 insertions(+), 30 deletions(-) diff --git a/art/estimators/certification/smoothed_vision_transformers/__init__.py b/art/estimators/certification/smoothed_vision_transformers/__init__.py index fd2b959474..5791128b5e 100644 --- a/art/estimators/certification/smoothed_vision_transformers/__init__.py +++ b/art/estimators/certification/smoothed_vision_transformers/__init__.py @@ -1 +1,4 @@ -from art.estimators.certification.smoothed_vision_transformers.pytorch import PyTorchSmoothedViT \ No newline at end of file +""" +Smoothed ViT estimators. +""" +from art.estimators.certification.smoothed_vision_transformers.pytorch import PyTorchSmoothedViT diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers/pytorch.py index bfb6c1dd7c..1ffdb02e0d 100644 --- a/art/estimators/certification/smoothed_vision_transformers/pytorch.py +++ b/art/estimators/certification/smoothed_vision_transformers/pytorch.py @@ -573,8 +573,10 @@ def fit( # pylint: disable=W0221 for _ in tqdm(range(nb_epochs)): # Shuffle the examples random.shuffle(ind) + epoch_acc = [] epoch_loss = [] + epoch_batch_sizes = [] pbar = tqdm(range(num_batch), disable=not verbose) @@ -605,8 +607,6 @@ def fit( # pylint: disable=W0221 loss = self.loss(model_outputs, o_batch) acc = self.get_accuracy(preds=model_outputs, labels=o_batch) - epoch_acc.append(acc) - epoch_loss.append(loss) # Do training if self._use_amp: # pragma: no cover @@ -620,9 +620,14 @@ def fit( # pylint: disable=W0221 self.optimizer.step() + epoch_acc.append(acc) + epoch_loss.append(loss.cpu().detach().numpy()) + epoch_batch_sizes.append(len(i_batch)) + if verbose: pbar.set_description( - f"Loss {torch.mean(torch.stack(epoch_loss)):.2f}" f" Acc {np.mean(epoch_acc):.2f}" + f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} " + f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} " ) if scheduler is not None: @@ -634,7 +639,6 @@ def eval_and_certify( y: np.ndarray, size_to_certify: int, batch_size: int = 128, - drop_last: bool = False, verbose: bool = True, ) -> Tuple["torch.Tensor", "torch.Tensor"]: """ @@ -645,7 +649,6 @@ def eval_and_certify( :param size_to_certify: The size of the patch to certify against. If not provided will default to the ablation size. :param batch_size: batch size when evaluating. - :param drop_last: :param verbose: If to display the progress bar :return: The accuracy and certified accuracy over the dataset """ @@ -659,18 +662,15 @@ def eval_and_certify( # Check label shape y_preprocessed = self.reduce_labels(y_preprocessed) - num_batch = len(x_preprocessed) / float(batch_size) - if drop_last: - num_batch = int(np.floor(num_batch)) - else: - num_batch = int(np.ceil(num_batch)) + num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) pbar = tqdm(range(num_batch), disable=not verbose) - accuracy = [] - cert_acc = [] + accuracy = torch.tensor(0.0).to(self._device) + cert_sum = torch.tensor(0.0).to(self._device) + n_samples = 0 with torch.no_grad(): for m in pbar: - if m == num_batch and not drop_last: + if m == (num_batch - 1): i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size :])).to(self._device) o_batch = torch.from_numpy(y_preprocessed[m * batch_size :]).to(self._device) else: @@ -678,33 +678,32 @@ def eval_and_certify( self._device ) o_batch = torch.from_numpy(y_preprocessed[m * batch_size : (m + 1) * batch_size]).to(self._device) + predictions = [] - pred_counts = torch.zeros((batch_size, self.nb_classes)).to(self._device) + pred_counts = torch.zeros((len(i_batch), self.nb_classes)).to(self._device) for pos in range(i_batch.shape[-1]): ablated_batch = self.ablator.forward(i_batch, column_pos=pos) # Perform prediction model_outputs = self.model(ablated_batch) - pred_counts[np.arange(0, batch_size), model_outputs.argmax(dim=-1)] += 1 + pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1)] += 1 predictions.append(model_outputs) _, cert_and_correct, top_predicted_class = self.ablator.certify( pred_counts, size_to_certify=size_to_certify, label=o_batch ) - cert_acc.append(torch.sum(cert_and_correct) / batch_size) - acc = torch.sum(top_predicted_class == o_batch) / batch_size - accuracy.append(acc) + cert_sum += torch.sum(cert_and_correct) + accuracy += torch.sum(top_predicted_class == o_batch) + n_samples += len(cert_and_correct) - pbar.set_description( - f"Normal Acc {torch.mean(torch.stack(accuracy)):.2f} " - f"Cert Acc {torch.mean(torch.stack(cert_acc)):.2f}" - ) - return torch.mean(torch.stack(accuracy)), torch.mean(torch.stack(cert_acc)) + pbar.set_description(f"Normal Acc {accuracy / n_samples:.3f} " f"Cert Acc {cert_sum / n_samples:.3f}") + + return (accuracy / n_samples), (cert_sum / n_samples) @staticmethod def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: """ - Helper function to print out the accuracy during training. + Helper function to get the accuracy during training. :param preds: model predictions. :param labels: ground truth labels (not one hot). diff --git a/notebooks/smoothed_vision_transformers.ipynb b/notebooks/smoothed_vision_transformers.ipynb index 91c0d8c874..c38132fbe1 100644 --- a/notebooks/smoothed_vision_transformers.ipynb +++ b/notebooks/smoothed_vision_transformers.ipynb @@ -521,7 +521,7 @@ " optimizer=optimizer, # the optimizer to use: note! this is not initialised here we just supply the class!\n", " input_shape=(3, 32, 32), # the input shape of the data: Note! that if this is a different shape to what the ViT expects it will be re-scaled\n", " nb_classes=10,\n", - " ablation_size=4,\n", + " ablation_size=4, # Size of the retained column\n", " replace_last_layer=True, # Replace the last layer with a new set of weights to fine tune on new data\n", " load_pretrained=True) # if to load pre-trained weights for the ViT" ] @@ -852,7 +852,7 @@ " optimizer_params={\"lr\": 0.01}, # the parameters to use\n", " input_shape=(3, 32, 32), # the input shape of the data: Note! that if this is a different shape to what the ViT expects it will be re-scaled\n", " nb_classes=10,\n", - " ablation_size=4,\n", + " ablation_size=4, # Size of the retained column\n", " replace_last_layer=True, # Replace the last layer with a new set of weights to fine tune on new data\n", " load_pretrained=True) # if to load pre-trained weights for the ViT" ] @@ -883,7 +883,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 6, @@ -947,7 +947,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "id": "046b8168", "metadata": { "scrolled": true @@ -957,7 +957,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "Normal Acc 0.89 Cert Acc 0.68: 49%|██████████████████████████████████████████████████████████████████████▏ | 38/78 [00:34<00:34, 1.15it/s]" + "Normal Acc 0.891 Cert Acc 0.684: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 79/79 [01:09<00:00, 1.14it/s]\n" ] } ], From 4bfbeb689715dacb699f505768909f42ddaafeae Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Tue, 30 May 2023 10:02:55 +0100 Subject: [PATCH 17/55] remove dev file Signed-off-by: GiulioZizzo --- dev.py | 57 --------------------------------------------------------- 1 file changed, 57 deletions(-) delete mode 100644 dev.py diff --git a/dev.py b/dev.py deleted file mode 100644 index 007df78000..0000000000 --- a/dev.py +++ /dev/null @@ -1,57 +0,0 @@ -import torch -import ssl -ssl._create_default_https_context = ssl._create_unverified_context -from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT -import numpy as np -from torchvision import datasets -from torchvision import transforms - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - -def get_cifar_data(): - """ - Get CIFAR-10 data. - :return: cifar train/test data. - """ - train_set = datasets.CIFAR10('./data', train=True, download=True) - test_set = datasets.CIFAR10('./data', train=False, download=True) - - x_train = train_set.data.astype(np.float32) - y_train = np.asarray(train_set.targets) - - x_test = test_set.data.astype(np.float32) - y_test = np.asarray(test_set.targets) - - x_train = np.moveaxis(x_train, [3], [1]) - x_test = np.moveaxis(x_test, [3], [1]) - - x_train = x_train / 255.0 - x_test = x_test / 255.0 - - return (x_train, y_train), (x_test, y_test) - - -(x_train, y_train), (x_test, y_test) = get_cifar_data() - -art_model = PyTorchSmoothedViT(model='vit_small_patch16_224', - loss=torch.nn.CrossEntropyLoss(), - optimizer=torch.optim.SGD, - optimizer_params={"lr": 0.01}, - input_shape=(3, 32, 32), - nb_classes=10, - ablation_size=4, - replace_last_layer=True, - load_pretrained=True, - ) - -scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1) -# art_model.fit(x_train, y_train, -# nb_epochs=30, -# update_batchnorm=True, -# scheduler=scheduler, -# transform=transforms.Compose([transforms.RandomHorizontalFlip()])) - -# torch.save(art_model.model.state_dict(), 'trained.pt') -# art_model.model.load_state_dict(torch.load('trained.pt')) -art_model.eval_and_certify(x_test, y_test, size_to_certify=4) From caa221e901b45153cadb918aee25d9357e7bcbc6 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Tue, 30 May 2023 11:26:38 +0100 Subject: [PATCH 18/55] black formatting changes, updating requirements_test.txt Signed-off-by: GiulioZizzo --- requirements_test.txt | 2 +- .../certification/test_smooth_vit.py | 106 ++++++++++-------- 2 files changed, 62 insertions(+), 46 deletions(-) diff --git a/requirements_test.txt b/requirements_test.txt index 87add272e9..91a3828ae3 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -35,7 +35,7 @@ torchaudio==0.13.1+cpu torchvision==0.14.1+cpu # PyTorch image transformers -timm @ git+https://github.com/huggingface/pytorch-image-models.git@9fcc01930aae865ec9ef8aae8849ca2ba241f816 +timm@git+https://github.com/huggingface/pytorch-image-models.git@9fcc01930aae865ec9ef8aae8849ca2ba241f816 catboost==1.1.1 GPy==1.10.0 diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index 42897414e1..a3d76aae28 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -66,17 +66,21 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): Check that the ablation is being performed correctly """ import torch - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") from art.estimators.certification.smoothed_vision_transformers.smooth_vit import ColumnAblator + try: cifar_data = fix_get_cifar10_data[0] cifar_labels = fix_get_cifar10_data[1] - col_ablator = ColumnAblator(ablation_size=4, - channels_first=True, - to_reshape=False, # do not upsample initially - original_shape=(3, 32, 32), - output_shape=(3, 224, 224)) + col_ablator = ColumnAblator( + ablation_size=4, + channels_first=True, + to_reshape=False, # do not upsample initially + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + ) cifar_data = torch.from_numpy(cifar_data).to(device) # check that the ablation functioned when in the middle of the image @@ -96,26 +100,28 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): assert torch.sum(ablated[:, :, :, :2]) > 0 # check that upsampling works as expected - col_ablator = ColumnAblator(ablation_size=4, - channels_first=True, - to_reshape=True, - original_shape=(3, 32, 32), - output_shape=(3, 224, 224)) + col_ablator = ColumnAblator( + ablation_size=4, + channels_first=True, + to_reshape=True, + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + ) ablated = col_ablator.forward(cifar_data, column_pos=10) assert ablated.shape[1] == 4 - assert torch.sum(ablated[:, :, :, :10*7]) == 0 - assert torch.sum(ablated[:, :, :, 10*7:14*7]) > 0 - assert torch.sum(ablated[:, :, :, 14*7:]) == 0 + assert torch.sum(ablated[:, :, :, : 10 * 7]) == 0 + assert torch.sum(ablated[:, :, :, 10 * 7 : 14 * 7]) > 0 + assert torch.sum(ablated[:, :, :, 14 * 7 :]) == 0 # check that the ablation wraps when on the edge of the image ablated = col_ablator.forward(cifar_data, column_pos=30) assert ablated.shape[1] == 4 - assert torch.sum(ablated[:, :, :, 30*7:]) > 0 - assert torch.sum(ablated[:, :, :, 2*7:30*7]) == 0 - assert torch.sum(ablated[:, :, :, :2*7]) > 0 + assert torch.sum(ablated[:, :, :, 30 * 7 :]) > 0 + assert torch.sum(ablated[:, :, :, 2 * 7 : 30 * 7]) == 0 + assert torch.sum(ablated[:, :, :, : 2 * 7]) > 0 except ARTTestException as e: art_warning(e) @@ -127,19 +133,22 @@ def test_pytorch_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data) Check that the training loop for pytorch does not result in errors """ import torch + try: cifar_data = fix_get_cifar10_data[0][:50] cifar_labels = fix_get_cifar10_data[1][:50] - art_model = PyTorchSmoothedViT(model='vit_small_patch16_224', - loss=torch.nn.CrossEntropyLoss(), - optimizer=torch.optim.SGD, - optimizer_params={"lr": 0.01}, - input_shape=(3, 32, 32), - nb_classes=10, - ablation_size=4, - load_pretrained=True, - replace_last_layer=True) + art_model = PyTorchSmoothedViT( + model="vit_small_patch16_224", + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + load_pretrained=True, + replace_last_layer=True, + ) scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[1], gamma=0.1) art_model.fit(cifar_data, cifar_labels, nb_epochs=2, update_batchnorm=True, scheduler=scheduler) @@ -157,15 +166,19 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 import torch try: - col_ablator = ColumnAblator(ablation_size=4, - channels_first=True, - to_reshape=True, # do not upsample initially - original_shape=(3, 32, 32), - output_shape=(3, 224, 224)) + col_ablator = ColumnAblator( + ablation_size=4, + channels_first=True, + to_reshape=True, # do not upsample initially + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + ) pred_counts = torch.from_numpy(np.asarray([[20, 5, 1], [10, 5, 1], [1, 16, 1]])) - cert, cert_and_correct, top_predicted_class = col_ablator.certify(pred_counts=pred_counts, - size_to_certify=4, - label=0,) + cert, cert_and_correct, top_predicted_class = col_ablator.certify( + pred_counts=pred_counts, + size_to_certify=4, + label=0, + ) assert torch.equal(cert, torch.tensor([True, False, True])) assert torch.equal(cert_and_correct, torch.tensor([True, False, False])) except ARTTestException as e: @@ -216,6 +229,7 @@ def token_dropper(cls, x, mask): The implementation of dropping tokens has been done slightly differently in this tool. Here we check that it is equivalent to the original implementation """ + class MaskProcessor(torch.nn.Module): def __init__(self, patch_size=16): super().__init__() @@ -237,7 +251,7 @@ def forward(self, ones_mask): if patch_mask is not None: # patch_mask is B, K B, N, C = x.shape - if len(patch_mask.shape) == 1: # not a separate one per batch + if len(patch_mask.shape) == 1: # not a separate one per batch x = x[:, patch_mask] else: patch_mask = patch_mask.unsqueeze(-1).expand(-1, -1, C) @@ -271,7 +285,7 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor: ablated_input = True if ablated_input: - x, ablation_mask = x[:, :self.in_chans], x[:, self.in_chans:self.in_chans + 1] + x, ablation_mask = x[:, : self.in_chans], x[:, self.in_chans : self.in_chans + 1] x = self.patch_embed(x) @@ -299,15 +313,17 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor: # Replace the forward_features with the forward_features code with checks. ArtViT.forward_features = forward_features - art_model = PyTorchSmoothedViT(model='vit_small_patch16_224', - loss=torch.nn.CrossEntropyLoss(), - optimizer=torch.optim.SGD, - optimizer_params={"lr": 0.01}, - input_shape=(3, 32, 32), - nb_classes=10, - ablation_size=4, - load_pretrained=False, - replace_last_layer=True) + art_model = PyTorchSmoothedViT( + model="vit_small_patch16_224", + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + load_pretrained=False, + replace_last_layer=True, + ) cifar_data = fix_get_cifar10_data[0][:50] cifar_labels = fix_get_cifar10_data[1][:50] From 9508d92dfdfdc86d3e128aebd3bf7e756e976960 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 31 May 2023 08:52:39 +0100 Subject: [PATCH 19/55] adding checks for timm installation before importing smoothed vits Signed-off-by: GiulioZizzo --- setup.py | 1 + tests/estimators/certification/test_smooth_vit.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index eff1b154b5..e6c0cfc077 100644 --- a/setup.py +++ b/setup.py @@ -112,6 +112,7 @@ def get_version(rel_path): "requests", "sortedcontainers", "numba", + # "timm" to be added as a dependency after the next timm release ], }, classifiers=[ diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index a3d76aae28..cdc8546d61 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -72,7 +72,6 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): try: cifar_data = fix_get_cifar10_data[0] - cifar_labels = fix_get_cifar10_data[1] col_ablator = ColumnAblator( ablation_size=4, @@ -240,7 +239,7 @@ def forward(self, ones_mask): ones_mask = ones_mask[0].unsqueeze(0) # take the first mask ones_mask = self.avg_pool(ones_mask)[0] ones_mask = torch.where(ones_mask.view(-1) > 0)[0] + 1 - ones_mask = torch.cat([torch.cuda.IntTensor(1).fill_(0), ones_mask]).unsqueeze(0) + ones_mask = torch.cat([torch.IntTensor(1).fill_(0), ones_mask]).unsqueeze(0) ones_mask = ones_mask.expand(B, -1) return ones_mask From 2de94b7fa32fe33555e2a55fef0b82b821814efb Mon Sep 17 00:00:00 2001 From: GiulioZizzo <41791963+GiulioZizzo@users.noreply.github.com> Date: Fri, 16 Jun 2023 08:49:34 +0100 Subject: [PATCH 20/55] Updates from review feedback Co-authored-by: Beat Buesser <49047826+beat-buesser@users.noreply.github.com> Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/pytorch.py | 14 +++++++------- .../smoothed_vision_transformers/smooth_vit.py | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers/pytorch.py index 1ffdb02e0d..4a5eb1e6f7 100644 --- a/art/estimators/certification/smoothed_vision_transformers/pytorch.py +++ b/art/estimators/certification/smoothed_vision_transformers/pytorch.py @@ -28,11 +28,10 @@ import logging from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING import random -import torch - -from timm.models.vision_transformer import VisionTransformer import numpy as np +from timm.models.vision_transformer import VisionTransformer +import torch from tqdm import tqdm from art.estimators.classification.pytorch import PyTorchClassifier @@ -49,7 +48,8 @@ class PatchEmbed(torch.nn.Module): - """Image to Patch Embedding + """ + Image to Patch Embedding Class adapted from the implementation in https://github.com/MadryLab/smoothed-vit @@ -75,8 +75,7 @@ class PatchEmbed(torch.nn.Module): AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - + SOFTWARE """ def __init__(self, patch_size: int = 16, in_channels: int = 1, embed_dim: int = 768): @@ -381,7 +380,6 @@ def get_models(cls, generate_from_null: bool = False) -> List[str]: Return the supported model names to the user. :param generate_from_null: If to re-check the creation of all the ViTs in timm from scratch. - Can be time-consuming. :return: A list of compatible models """ import timm @@ -491,6 +489,7 @@ def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwar def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) -> None: """ Method to update the batchnorm of a ViT on small datasets + :param x: Training data. :param batch_size: Size of batches. :param nb_epochs: How many times to forward pass over the input data @@ -525,6 +524,7 @@ def fit( # pylint: disable=W0221 ) -> None: """ Fit the classifier on the training set `(x, y)`. + :param x: Training data. :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of shape (nb_samples,). diff --git a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py index f9a660199e..2a0f5bc564 100644 --- a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py +++ b/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py @@ -38,6 +38,7 @@ class UpSampler(torch.nn.Module): def __init__(self, input_size: int, final_size: int) -> None: """ Creates an upsampler to make the supplied data match the pre-trained ViT format + :param input_size: Size of the current input data :param final_size: Desired final size """ From 72208762313418055099b046d6110c0e4d80d95a Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Fri, 16 Jun 2023 09:26:02 +0100 Subject: [PATCH 21/55] pr review edits Signed-off-by: GiulioZizzo --- .../smoothed_vision_transformers/pytorch.py | 12 +++++++----- notebooks/README.md | 3 +++ requirements_test.txt | 2 +- setup.py | 2 +- tests/estimators/certification/test_smooth_vit.py | 3 +-- 5 files changed, 13 insertions(+), 9 deletions(-) diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers/pytorch.py index 4a5eb1e6f7..c4ba0ed050 100644 --- a/art/estimators/certification/smoothed_vision_transformers/pytorch.py +++ b/art/estimators/certification/smoothed_vision_transformers/pytorch.py @@ -44,6 +44,7 @@ from art.defences.preprocessor import Preprocessor from art.defences.postprocessor import Postprocessor +logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) @@ -145,7 +146,8 @@ class ArtViT(VisionTransformer): def __init__(self, **kwargs): """ Create a ArtViT instance - :param kwargs: keyword arguments required to create the mask embedder. + :param kwargs: keyword arguments required to create the mask embedder and the vision transformer class + Must contain ... """ self.to_drop_tokens = kwargs["drop_tokens"] @@ -333,7 +335,7 @@ def __init__( if model.default_cfg["input_size"] != input_shape: if verbose: - print( + logger.warning( f"ViT expects input shape of {model.default_cfg['input_size']}, " f"but {input_shape} specified as the input shape. " f"The input will be rescaled to {model.default_cfg['input_size']}" @@ -360,7 +362,7 @@ def __init__( self.ablation_size = (ablation_size,) if verbose: - print(self.model) + logger.info(self.model) self.ablator = ColumnAblator( ablation_size=ablation_size, @@ -439,7 +441,7 @@ def get_models(cls, generate_from_null: bool = False) -> List[str]: models = timm.list_models("vit_*") for model in models: - print(f"Testing {model} creation") + logger.info(f"Testing {model} creation") try: _ = PyTorchSmoothedViT( model=model, @@ -458,7 +460,7 @@ def get_models(cls, generate_from_null: bool = False) -> List[str]: unsupported.append(model) if supported != supported_models: - print( + logger.warning( "Difference between the generated and fixed model list. Although not necessarily " "an error, this may point to the timm library being updated." ) diff --git a/notebooks/README.md b/notebooks/README.md index 7ab184e397..95806cbf65 100644 --- a/notebooks/README.md +++ b/notebooks/README.md @@ -296,6 +296,9 @@ demonstrates using interval bound propagation for certification of neural networ

+[smoothed_vision_transformers.ipynb](smoothed_vision_transformers.ipynb) [[on nbviewer](https://nbviewer.jupyter.org/github/Trusted-AI/adversarial-robustness-toolbox/blob/main/notebooks/smoothed_vision_transformers.ipynb)] +Demonstrates training a neural network using smoothed vision transformers for certified performance against patch attacks. + ## MNIST [fabric_for_deep_learning_adversarial_samples_fashion_mnist.ipynb](fabric_for_deep_learning_adversarial_samples_fashion_mnist.ipynb) [[on nbviewer](https://nbviewer.jupyter.org/github/Trusted-AI/adversarial-robustness-toolbox/blob/main/notebooks/fabric_for_deep_learning_adversarial_samples_fashion_mnist.ipynb)] diff --git a/requirements_test.txt b/requirements_test.txt index 91a3828ae3..b76cb982e9 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -35,7 +35,7 @@ torchaudio==0.13.1+cpu torchvision==0.14.1+cpu # PyTorch image transformers -timm@git+https://github.com/huggingface/pytorch-image-models.git@9fcc01930aae865ec9ef8aae8849ca2ba241f816 +timm==0.9.2 catboost==1.1.1 GPy==1.10.0 diff --git a/setup.py b/setup.py index e6c0cfc077..476d53b886 100644 --- a/setup.py +++ b/setup.py @@ -112,7 +112,7 @@ def get_version(rel_path): "requests", "sortedcontainers", "numba", - # "timm" to be added as a dependency after the next timm release + "timm", ], }, classifiers=[ diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index cdc8546d61..1d8a13c8ae 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -292,7 +292,7 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self._pos_embed(x) assert torch.equal(madry_embed, x) - # pass the x into the token dropping code from ... + # pass the x into the token dropping code madry_dropped = MadrylabImplementations.token_dropper(copy.copy(x), ablation_mask) if self.to_drop_tokens and ablated_input: @@ -301,7 +301,6 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor: indexes = torch.gt(torch.where(to_drop > 1, 1, 0), 0) x = self.drop_tokens(x, indexes) - print(torch.equal(madry_dropped, x)) assert torch.equal(madry_dropped, x) x = self.norm_pre(x) From 5360d89d2dc63e5cb5994c4d423451fbf14623a3 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Tue, 27 Jun 2023 08:47:01 +0100 Subject: [PATCH 22/55] move vit functionality into derandomised smoothing toolset Signed-off-by: GiulioZizzo --- .../derandomized_smoothing/pytorch.py | 195 +++++- .../vision_transformers/__init__.py | 0 .../vision_transformers/pytorch.py | 559 ++++++++++++++++++ .../vision_transformers/smooth_vit.py | 154 +++++ .../vision_transformers/vit.py | 170 ++++++ .../__init__.py | 0 .../pytorch.py | 0 .../smooth_vit.py | 0 dev.py | 56 ++ 9 files changed, 1127 insertions(+), 7 deletions(-) create mode 100644 art/estimators/certification/derandomized_smoothing/vision_transformers/__init__.py create mode 100644 art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py create mode 100644 art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py create mode 100644 art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py rename art/estimators/certification/{smoothed_vision_transformers => smoothed_vision_transformers_old}/__init__.py (100%) rename art/estimators/certification/{smoothed_vision_transformers => smoothed_vision_transformers_old}/pytorch.py (100%) rename art/estimators/certification/{smoothed_vision_transformers => smoothed_vision_transformers_old}/smooth_vit.py (100%) create mode 100644 dev.py diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 4a184b3666..422ca98d74 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -22,7 +22,7 @@ """ from __future__ import absolute_import, division, print_function, unicode_literals - +import importlib import logging from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING import random @@ -32,6 +32,7 @@ from art.config import ART_NUMPY_DTYPE from art.estimators.classification.pytorch import PyTorchClassifier +from art.estimators.certification.derandomized_smoothing.vision_transformers.pytorch import PyTorchSmoothedViT from art.estimators.certification.derandomized_smoothing.derandomized_smoothing import DeRandomizedSmoothingMixin from art.utils import check_and_transform_label_format @@ -46,7 +47,7 @@ logger = logging.getLogger(__name__) -class PyTorchDeRandomizedSmoothing(DeRandomizedSmoothingMixin, PyTorchClassifier): +class PyTorchDeRandomizedSmoothingCNN(DeRandomizedSmoothingMixin, PyTorchClassifier): """ Implementation of (De)Randomized Smoothing applied to classifier predictions as introduced in Levine et al. (2020). @@ -148,6 +149,150 @@ def _fit_classifier(self, x: np.ndarray, y: np.ndarray, batch_size: int, nb_epoc x = x.astype(ART_NUMPY_DTYPE) return PyTorchClassifier.fit(self, x, y, batch_size=batch_size, nb_epochs=nb_epochs, **kwargs) + def fit_old( # pylint: disable=W0221 + self, + x: np.ndarray, + y: np.ndarray, + batch_size: int = 128, + nb_epochs: int = 10, + training_mode: bool = True, + drop_last: bool = False, + scheduler: Optional[Any] = None, + update_batchnorm: bool = True, + batchnorm_update_epochs: int = 1, + transform: Optional["torchvision.transforms.transforms.Compose"] = None, + verbose: bool = True, + **kwargs, + ) -> None: + """ + Fit the classifier on the training set `(x, y)`. + + :param x: Training data. + :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of + shape (nb_samples,). + :param batch_size: Size of batches. + :param nb_epochs: Number of epochs to use for training. + :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by + the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then + the last batch will be smaller. (default: ``False``) + :param scheduler: Learning rate scheduler to run at the start of every epoch. + :param update_batchnorm: ViT Specific Arg. If to run the training data through the model to update any batch norm statistics prior + to training. Useful on small datasets when using pre-trained ViTs. + :param batchnorm_update_epochs: ViT Specific Arg. How many times to forward pass over the training data + to pre-adjust the batchnorm statistics. + :param transform: ViT Specific Arg. Torchvision compose of relevant augmentation transformations to apply. + :param verbose: if to display training progress bars + :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch + and providing it takes no effect. + """ + import torch + + # Check if we have a VIT + + # Set model mode + self._model.train(mode=training_mode) + + if self._optimizer is None: # pragma: no cover + raise ValueError("An optimizer is needed to train the model, but none for provided.") + + y = check_and_transform_label_format(y, nb_classes=self.nb_classes) + + # Apply preprocessing + x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) + + if update_batchnorm: # VIT specific + self.update_batchnorm(x_preprocessed, batch_size, nb_epochs=batchnorm_update_epochs) + + # Check label shape + y_preprocessed = self.reduce_labels(y_preprocessed) + + num_batch = len(x_preprocessed) / float(batch_size) + if drop_last: + num_batch = int(np.floor(num_batch)) + else: + num_batch = int(np.ceil(num_batch)) + ind = np.arange(len(x_preprocessed)) + + # Start training + for _ in tqdm(range(nb_epochs)): + # Shuffle the examples + random.shuffle(ind) + + epoch_acc = [] + epoch_loss = [] + epoch_batch_sizes = [] + + pbar = tqdm(range(num_batch), disable=not verbose) + + # Train for one epoch + for m in pbar: + i_batch = np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]) + i_batch = self.ablator.forward(i_batch) + + if transform is not None: # VIT specific + i_batch = transform(i_batch) + + i_batch = torch.from_numpy(i_batch).to(self._device) + o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device) + + # Zero the parameter gradients + self._optimizer.zero_grad() + + # Perform prediction + try: + model_outputs = self.model(i_batch) + except ValueError as err: + if "Expected more than 1 value per channel when training" in str(err): + logger.exception( + "Try dropping the last incomplete batch by setting drop_last=True in " + "method PyTorchClassifier.fit." + ) + raise err + + loss = self.loss(model_outputs, o_batch) + acc = self.get_accuracy(preds=model_outputs, labels=o_batch) + + # Do training + if self._use_amp: # pragma: no cover + from apex import amp # pylint: disable=E0611 + + with amp.scale_loss(loss, self._optimizer) as scaled_loss: + scaled_loss.backward() + + else: + loss.backward() + + self.optimizer.step() + + epoch_acc.append(acc) + epoch_loss.append(loss.cpu().detach().numpy()) + epoch_batch_sizes.append(len(i_batch)) + + if verbose: + pbar.set_description( + f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} " + f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} " + ) + + if scheduler is not None: + scheduler.step() + + +class PyTorchDeRandomizedSmoothing(PyTorchDeRandomizedSmoothingCNN, PyTorchSmoothedViT): + def __init__(self, model: Union[str, "VisionTransformer", "torch.nn.Module"], **kwargs): + import torch + + if isinstance(model, torch.nn.Module): + PyTorchDeRandomizedSmoothingCNN.__init__(self, model, **kwargs) + self.mode = "CNN" + if importlib.util.find_spec("timm") is not None: + from timm.models.vision_transformer import VisionTransformer + + if isinstance(model, VisionTransformer) or isinstance(model, str): + PyTorchSmoothedViT.__init__(self, model, **kwargs) + self.mode = "ViT" + def fit( # pylint: disable=W0221 self, x: np.ndarray, @@ -157,10 +302,15 @@ def fit( # pylint: disable=W0221 training_mode: bool = True, drop_last: bool = False, scheduler: Optional[Any] = None, + update_batchnorm: bool = True, + batchnorm_update_epochs: int = 1, + transform: Optional["torchvision.transforms.transforms.Compose"] = None, + verbose: bool = True, **kwargs, ) -> None: """ Fit the classifier on the training set `(x, y)`. + :param x: Training data. :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of shape (nb_samples,). @@ -171,6 +321,13 @@ def fit( # pylint: disable=W0221 the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) :param scheduler: Learning rate scheduler to run at the start of every epoch. + :param update_batchnorm: ViT specific argument. + If to run the training data through the model to update any batch norm statistics prior + to training. Useful on small datasets when using pre-trained ViTs. + :param batchnorm_update_epochs: ViT specific argument. How many times to forward pass over the training data + to pre-adjust the batchnorm statistics. + :param transform: ViT specific argument. Torchvision compose of relevant augmentation transformations to apply. + :param verbose: if to display training progress bars :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch and providing it takes no effect. """ @@ -187,6 +344,9 @@ def fit( # pylint: disable=W0221 # Apply preprocessing x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) + if update_batchnorm and self.mode == "ViT": # VIT specific + self.update_batchnorm(x_preprocessed, batch_size, nb_epochs=batchnorm_update_epochs) + # Check label shape y_preprocessed = self.reduce_labels(y_preprocessed) @@ -202,11 +362,20 @@ def fit( # pylint: disable=W0221 # Shuffle the examples random.shuffle(ind) + epoch_acc = [] + epoch_loss = [] + epoch_batch_sizes = [] + + pbar = tqdm(range(num_batch), disable=not verbose) + # Train for one epoch - for m in range(num_batch): + for m in pbar: i_batch = np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]) i_batch = self.ablator.forward(i_batch) + if transform is not None and self.mode == "ViT": # VIT specific + i_batch = transform(i_batch) + i_batch = torch.from_numpy(i_batch).to(self._device) o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device) @@ -215,7 +384,7 @@ def fit( # pylint: disable=W0221 # Perform prediction try: - model_outputs = self._model(i_batch) + model_outputs = self.model(i_batch) except ValueError as err: if "Expected more than 1 value per channel when training" in str(err): logger.exception( @@ -224,8 +393,8 @@ def fit( # pylint: disable=W0221 ) raise err - # Form the loss function - loss = self._loss(model_outputs[-1], o_batch) + loss = self.loss(model_outputs, o_batch) + acc = self.get_accuracy(preds=model_outputs, labels=o_batch) # Do training if self._use_amp: # pragma: no cover @@ -237,7 +406,19 @@ def fit( # pylint: disable=W0221 else: loss.backward() - self._optimizer.step() + self.optimizer.step() + + epoch_acc.append(acc) + epoch_loss.append(loss.cpu().detach().numpy()) + epoch_batch_sizes.append(len(i_batch)) + + if verbose: + pbar.set_description( + f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} " + f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} " + ) if scheduler is not None: scheduler.step() + + diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/__init__.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py new file mode 100644 index 0000000000..1675dfa2c2 --- /dev/null +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py @@ -0,0 +1,559 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +""" +This module implements Certified Patch Robustness via Smoothed Vision Transformers + +| Paper link Accepted version: + https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf + +| Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf +""" +from __future__ import absolute_import, division, print_function, unicode_literals + +import logging +from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING +import random + +import numpy as np +from tqdm import tqdm + +from art.estimators.classification.pytorch import PyTorchClassifier +from art.estimators.certification.derandomized_smoothing.vision_transformers.smooth_vit import ColumnAblator +from art.utils import check_and_transform_label_format + +if TYPE_CHECKING: + import torchvision + from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE + from art.defences.preprocessor import Preprocessor + from art.defences.postprocessor import Postprocessor + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +class PyTorchSmoothedViT(PyTorchClassifier): + """ + Implementation of Certified Patch Robustness via Smoothed Vision Transformers + + | Paper link Accepted version: + https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf + + | Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf + """ + + def __init__( + self, + model: Union["VisionTransformer", str], + loss: "torch.nn.modules.loss._Loss", + input_shape: Tuple[int, ...], + nb_classes: int, + ablation_size: int, + replace_last_layer: bool, + drop_tokens: bool = True, + load_pretrained: bool = True, + optimizer: Union[type, "torch.optim.Optimizer", None] = None, + optimizer_params: Optional[dict] = None, + channels_first: bool = True, + clip_values: Optional["CLIP_VALUES_TYPE"] = None, + preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, + postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), + device_type: str = "gpu", + verbose: bool = True, + ): + """ + Create a smoothed ViT classifier. + + :param model: Either a string specifying which ViT architecture to load, or a vision transformer already + created with the Pytorch Image Models (timm) library. + :param loss: The loss function for which to compute gradients for training. The target label must be raw + categorical, i.e. not converted to one-hot encoding. + :param input_shape: The shape of one input instance. + :param nb_classes: The number of classes of the model. + :param ablation_size: The size of the data portion to retain after ablation. + :param replace_last_layer: If to replace the last layer of the ViT with a fresh layer matching the number + of classes for the dataset to be examined. Needed if going from the pre-trained + imagenet models to fine-tune on a dataset like CIFAR. + :param drop_tokens: If to drop the fully ablated tokens in the ViT + :param load_pretrained: If to load a pretrained model matching the ViT name. Will only affect the ViT if a + string name is passed to model rather than a ViT directly. + :param optimizer: The optimizer used to train the classifier. + :param channels_first: Set channels first or last. + :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and + maximum values allowed for features. If floats are provided, these will be used as the range of all + features. If arrays are provided, each value will be considered the bound for a feature, thus + the shape of clip values needs to match the total number of features. + :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier. + :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier. + :param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be + used for data preprocessing. The first value will be subtracted from the input. The input will then + be divided by the second one. + :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. + """ + import timm + import torch + from timm.models.vision_transformer import VisionTransformer + from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import ArtViT + + # temporarily assign the original method to tmp_func + tmp_func = timm.models.vision_transformer._create_vision_transformer + + # overrride with ART's ViT creation function + timm.models.vision_transformer._create_vision_transformer = self.art_create_vision_transformer + if isinstance(model, str): + model = timm.create_model( + model, pretrained=load_pretrained, drop_tokens=drop_tokens, device_type=device_type + ) + if replace_last_layer: + model.head = torch.nn.Linear(model.head.in_features, nb_classes) + if isinstance(optimizer, type): + if optimizer_params is not None: + optimizer = optimizer(model.parameters(), **optimizer_params) + else: + raise ValueError("If providing an optimiser please also supply its parameters") + + elif isinstance(model, VisionTransformer): + pretrained_cfg = model.pretrained_cfg + supplied_state_dict = model.state_dict() + supported_models = self.get_models() + if pretrained_cfg["architecture"] not in supported_models: + raise ValueError( + "Architecture not supported. Use PyTorchSmoothedViT.get_models() " + "to get the supported model architectures." + ) + model = timm.create_model(pretrained_cfg["architecture"], drop_tokens=drop_tokens, device_type=device_type) + model.load_state_dict(supplied_state_dict) + if replace_last_layer: + model.head = torch.nn.Linear(model.head.in_features, nb_classes) + + if optimizer is not None: + if not isinstance(optimizer, torch.optim.Optimizer): + raise ValueError("Optimizer error: must be a torch.optim.Optimizer instance") + + converted_optimizer: Union[torch.optim.Adam, torch.optim.SGD] + opt_state_dict = optimizer.state_dict() + if isinstance(optimizer, torch.optim.Adam): + logging.info("Converting Adam Optimiser") + converted_optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) + elif isinstance(optimizer, torch.optim.SGD): + logging.info("Converting SGD Optimiser") + converted_optimizer = torch.optim.SGD(model.parameters(), lr=1e-4) + else: + raise ValueError("Optimiser not supported for conversion") + converted_optimizer.load_state_dict(opt_state_dict) + + self.to_reshape = False + if not isinstance(model, ArtViT): + raise ValueError("Vision transformer is not of ArtViT. Error occurred in ArtViT creation.") + + if model.default_cfg["input_size"][0] != input_shape[0]: + raise ValueError( + f'ViT requires {model.default_cfg["input_size"][0]} channel input,' + f" but {input_shape[0]} channels were provided." + ) + + if model.default_cfg["input_size"] != input_shape: + if verbose: + logger.warning( + f"ViT expects input shape of {model.default_cfg['input_size']}, " + f"but {input_shape} specified as the input shape. " + f"The input will be rescaled to {model.default_cfg['input_size']}" + ) + self.to_reshape = True + + if optimizer is None or isinstance(optimizer, torch.optim.Optimizer): + super().__init__( + model=model, + loss=loss, + input_shape=input_shape, + nb_classes=nb_classes, + optimizer=optimizer, + channels_first=channels_first, + clip_values=clip_values, + preprocessing_defences=preprocessing_defences, + postprocessing_defences=postprocessing_defences, + preprocessing=preprocessing, + device_type=device_type, + ) + else: + raise ValueError("Error occurred in optimizer creation") + + self.ablation_size = (ablation_size,) + + if verbose: + logger.info(self.model) + + self.ablator = ColumnAblator( + ablation_size=ablation_size, + channels_first=True, + to_reshape=self.to_reshape, + original_shape=input_shape, + output_shape=model.default_cfg["input_size"], + device_type=device_type, + ) + + # set the method back to avoid unexpected side effects later on should timm need to be reused. + timm.models.vision_transformer._create_vision_transformer = tmp_func + + @classmethod + def get_models(cls, generate_from_null: bool = False) -> List[str]: + """ + Return the supported model names to the user. + + :param generate_from_null: If to re-check the creation of all the ViTs in timm from scratch. + :return: A list of compatible models + """ + import timm + import torch + + supported_models = [ + "vit_base_patch8_224", + "vit_base_patch16_18x2_224", + "vit_base_patch16_224", + "vit_base_patch16_224_miil", + "vit_base_patch16_384", + "vit_base_patch16_clip_224", + "vit_base_patch16_clip_384", + "vit_base_patch16_gap_224", + "vit_base_patch16_plus_240", + "vit_base_patch16_rpn_224", + "vit_base_patch16_xp_224", + "vit_base_patch32_224", + "vit_base_patch32_384", + "vit_base_patch32_clip_224", + "vit_base_patch32_clip_384", + "vit_base_patch32_clip_448", + "vit_base_patch32_plus_256", + "vit_giant_patch14_224", + "vit_giant_patch14_clip_224", + "vit_gigantic_patch14_224", + "vit_gigantic_patch14_clip_224", + "vit_huge_patch14_224", + "vit_huge_patch14_clip_224", + "vit_huge_patch14_clip_336", + "vit_huge_patch14_xp_224", + "vit_large_patch14_224", + "vit_large_patch14_clip_224", + "vit_large_patch14_clip_336", + "vit_large_patch14_xp_224", + "vit_large_patch16_224", + "vit_large_patch16_384", + "vit_large_patch32_224", + "vit_large_patch32_384", + "vit_medium_patch16_gap_240", + "vit_medium_patch16_gap_256", + "vit_medium_patch16_gap_384", + "vit_small_patch16_18x2_224", + "vit_small_patch16_36x1_224", + "vit_small_patch16_224", + "vit_small_patch16_384", + "vit_small_patch32_224", + "vit_small_patch32_384", + "vit_tiny_patch16_224", + "vit_tiny_patch16_384", + ] + + if not generate_from_null: + return supported_models + + supported = [] + unsupported = [] + + models = timm.list_models("vit_*") + for model in models: + logger.info(f"Testing {model} creation") + try: + _ = PyTorchSmoothedViT( + model=model, + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + load_pretrained=False, + replace_last_layer=True, + verbose=False, + ) + supported.append(model) + except (TypeError, AttributeError): + unsupported.append(model) + + if supported != supported_models: + logger.warning( + "Difference between the generated and fixed model list. Although not necessarily " + "an error, this may point to the timm library being updated." + ) + + return supported + + @staticmethod + def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> "ArtViT": + """ + Creates a vision transformer using ArtViT which controls the forward pass of the model + + :param variant: The name of the vision transformer to load + :param pretrained: If to load pre-trained weights + :return: A ViT with the required methods needed for ART + """ + + from timm.models._builder import build_model_with_cfg + from timm.models.vision_transformer import checkpoint_filter_fn + from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import ArtViT + + return build_model_with_cfg( + ArtViT, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs, + ) + + def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) -> None: + """ + Method to update the batchnorm of a ViT on small datasets + + :param x: Training data. + :param batch_size: Size of batches. + :param nb_epochs: How many times to forward pass over the input data + """ + import torch + self.model.train() + + ind = np.arange(len(x)) + num_batch = int(len(x) / float(batch_size)) + + with torch.no_grad(): + for _ in tqdm(range(nb_epochs)): + for m in tqdm(range(num_batch)): + i_batch = torch.from_numpy(np.copy(x[ind[m * batch_size : (m + 1) * batch_size]])).to(self.device) + i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) + _ = self.model(i_batch) + + def fit_old( # pylint: disable=W0221 + self, + x: np.ndarray, + y: np.ndarray, + batch_size: int = 128, + nb_epochs: int = 10, + training_mode: bool = True, + drop_last: bool = False, + scheduler: Optional[Any] = None, + update_batchnorm: bool = True, + batchnorm_update_epochs: int = 1, + transform: Optional["torchvision.transforms.transforms.Compose"] = None, + verbose: bool = True, + **kwargs, + ) -> None: + """ + Fit the classifier on the training set `(x, y)`. + + :param x: Training data. + :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of + shape (nb_samples,). + :param batch_size: Size of batches. + :param nb_epochs: Number of epochs to use for training. + :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by + the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then + the last batch will be smaller. (default: ``False``) + :param scheduler: Learning rate scheduler to run at the start of every epoch. + :param update_batchnorm: if to run the training data through the model to update any batch norm statistics prior + to training. Useful on small datasets when using pre-trained ViTs. + :param batchnorm_update_epochs: how many times to forward pass over the training data + to pre-adjust the batchnorm statistics. + :param transform: Torchvision compose of relevant augmentation transformations to apply. + :param verbose: if to display training progress bars + :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch + and providing it takes no effect. + """ + import torch + + # Set model mode + self._model.train(mode=training_mode) + + if self._optimizer is None: # pragma: no cover + raise ValueError("An optimizer is needed to train the model, but none for provided.") + + y = check_and_transform_label_format(y, nb_classes=self.nb_classes) + + # Apply preprocessing + x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) + + if update_batchnorm: + self.update_batchnorm(x_preprocessed, batch_size, nb_epochs=batchnorm_update_epochs) + + # Check label shape + y_preprocessed = self.reduce_labels(y_preprocessed) + + num_batch = len(x_preprocessed) / float(batch_size) + if drop_last: + num_batch = int(np.floor(num_batch)) + else: + num_batch = int(np.ceil(num_batch)) + ind = np.arange(len(x_preprocessed)) + + # Start training + for _ in tqdm(range(nb_epochs)): + # Shuffle the examples + random.shuffle(ind) + + epoch_acc = [] + epoch_loss = [] + epoch_batch_sizes = [] + + pbar = tqdm(range(num_batch), disable=not verbose) + + # Train for one epoch + for m in pbar: + i_batch = torch.from_numpy(np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]])).to( + self._device + ) + if transform is not None: + i_batch = transform(i_batch) + i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) + + o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device) + + # Zero the parameter gradients + self._optimizer.zero_grad() + + # Perform prediction + try: + model_outputs = self.model(i_batch) + except ValueError as err: + if "Expected more than 1 value per channel when training" in str(err): + logger.exception( + "Try dropping the last incomplete batch by setting drop_last=True in " + "method PyTorchClassifier.fit." + ) + raise err + + loss = self.loss(model_outputs, o_batch) + acc = self.get_accuracy(preds=model_outputs, labels=o_batch) + + # Do training + if self._use_amp: # pragma: no cover + from apex import amp # pylint: disable=E0611 + + with amp.scale_loss(loss, self._optimizer) as scaled_loss: + scaled_loss.backward() + + else: + loss.backward() + + self.optimizer.step() + + epoch_acc.append(acc) + epoch_loss.append(loss.cpu().detach().numpy()) + epoch_batch_sizes.append(len(i_batch)) + + if verbose: + pbar.set_description( + f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} " + f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} " + ) + + if scheduler is not None: + scheduler.step() + + def eval_and_certify( + self, + x: np.ndarray, + y: np.ndarray, + size_to_certify: int, + batch_size: int = 128, + verbose: bool = True, + ) -> Tuple["torch.Tensor", "torch.Tensor"]: + """ + Evaluates the ViT's normal and certified performance over the supplied data. + + :param x: Evaluation data. + :param y: Evaluation labels. + :param size_to_certify: The size of the patch to certify against. + If not provided will default to the ablation size. + :param batch_size: batch size when evaluating. + :param verbose: If to display the progress bar + :return: The accuracy and certified accuracy over the dataset + """ + import torch + + self.model.eval() + y = check_and_transform_label_format(y, nb_classes=self.nb_classes) + + # Apply preprocessing + x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) + + # Check label shape + y_preprocessed = self.reduce_labels(y_preprocessed) + + num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) + pbar = tqdm(range(num_batch), disable=not verbose) + accuracy = torch.tensor(0.0).to(self._device) + cert_sum = torch.tensor(0.0).to(self._device) + n_samples = 0 + + with torch.no_grad(): + for m in pbar: + if m == (num_batch - 1): + i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size :])).to(self._device) + o_batch = torch.from_numpy(y_preprocessed[m * batch_size :]).to(self._device) + else: + i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size : (m + 1) * batch_size])).to( + self._device + ) + o_batch = torch.from_numpy(y_preprocessed[m * batch_size : (m + 1) * batch_size]).to(self._device) + + predictions = [] + pred_counts = torch.zeros((len(i_batch), self.nb_classes)).to(self._device) + for pos in range(i_batch.shape[-1]): + ablated_batch = self.ablator.forward(i_batch, column_pos=pos) + + # Perform prediction + model_outputs = self.model(ablated_batch) + pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1)] += 1 + predictions.append(model_outputs) + + _, cert_and_correct, top_predicted_class = self.ablator.certify( + pred_counts, size_to_certify=size_to_certify, label=o_batch + ) + cert_sum += torch.sum(cert_and_correct) + accuracy += torch.sum(top_predicted_class == o_batch) + n_samples += len(cert_and_correct) + + pbar.set_description(f"Normal Acc {accuracy / n_samples:.3f} " f"Cert Acc {cert_sum / n_samples:.3f}") + + return (accuracy / n_samples), (cert_sum / n_samples) + + @staticmethod + def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: + """ + Helper function to get the accuracy during training. + + :param preds: model predictions. + :param labels: ground truth labels (not one hot). + :return: prediction accuracy. + """ + + if not isinstance(preds, np.ndarray): + preds = preds.detach().cpu().numpy() + + if not isinstance(preds, np.ndarray): + labels = labels.detach().cpu().numpy() + + return np.sum(np.argmax(preds, axis=1) == labels) / len(labels) diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py new file mode 100644 index 0000000000..5bf993c707 --- /dev/null +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py @@ -0,0 +1,154 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +""" +This module implements Certified Patch Robustness via Smoothed Vision Transformers + +| Paper link Accepted version: + https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf + +| Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf +""" + +from typing import Optional, Tuple +import random + +import torch + + +class UpSampler(torch.nn.Module): + """ + Resizes datasets to the specified size. + Usually for upscaling datasets like CIFAR to Imagenet format + """ + + def __init__(self, input_size: int, final_size: int) -> None: + """ + Creates an upsampler to make the supplied data match the pre-trained ViT format + + :param input_size: Size of the current input data + :param final_size: Desired final size + """ + super().__init__() + self.upsample = torch.nn.Upsample(scale_factor=final_size / input_size) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Forward pass though the upsampler. + + :param x: Input data + :return: The upsampled input data + """ + return self.upsample(x) + + +class ColumnAblator(torch.nn.Module): + """ + Pure Pytorch implementation of stripe/column ablation. + """ + + def __init__( + self, + ablation_size: int, + channels_first: bool, + to_reshape: bool = False, + original_shape: Optional[Tuple] = None, + output_shape: Optional[Tuple] = None, + device_type: str = "gpu", + ): + """ + Creates a column ablator + + :param ablation_size: The size of the column we will retain. + :param channels_first: If the input is in channels first format. Currently required to be True. + :param to_reshape: If the input requires reshaping. + :param original_shape: Original shape of the input. + :param output_shape: Input shape expected by the ViT. Usually means upscaling the input to 224 x 224. + """ + super().__init__() + self.ablation_size = ablation_size + self.channels_first = channels_first + self.to_reshape = to_reshape + + if device_type == "cpu" or not torch.cuda.is_available(): + self.device = torch.device("cpu") + else: # pragma: no cover + cuda_idx = torch.cuda.current_device() + self.device = torch.device(f"cuda:{cuda_idx}") + + if original_shape is not None and output_shape is not None: + self.upsample = UpSampler(input_size=original_shape[1], final_size=output_shape[1]) + + def ablate(self, x: torch.Tensor, column_pos: Optional[int] = None) -> torch.Tensor: + """ + Ablates the input colum wise + + :param x: Input data + :param column_pos: The start position of the albation + :return: The ablated input with 0s where the ablation occurred + """ + k = self.ablation_size + if column_pos is None: + column_pos = random.randint(0, x.shape[3]) + + if column_pos + k > x.shape[-1]: + x[:, :, :, (column_pos + k) % x.shape[-1] : column_pos] = 0.0 + else: + x[:, :, :, :column_pos] = 0.0 + x[:, :, :, column_pos + k :] = 0.0 + return x + + def forward(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: + """ + Forward pass though the ablator. We insert a new channel to keep track of the ablation location. + + :param x: Input data + :param column_pos: The start position of the albation + :return: The albated input with an extra channel indicating the location of the ablation + """ + assert x.shape[1] == 3 + ones = torch.torch.ones_like(x[:, 0:1, :, :]).to(self.device) + x = torch.cat([x, ones], dim=1) + x = self.ablate(x, column_pos=column_pos) + if self.to_reshape: + x = self.upsample(x) + return x + + def certify( + self, pred_counts: torch.Tensor, size_to_certify: int, label: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Performs certification of the predictions + + :param pred_counts: The model predictions over the ablated data. + :param size_to_certify: The patch size we wish to check certification against + :param label: The ground truth labels + :return: A tuple consisting of: the certified predictions, + the predictions which were certified and also correct, + and the most predicted class across the different ablations on the input. + """ + + num_of_classes = pred_counts.shape[-1] + + top_class_counts, top_predicted_class = pred_counts.kthvalue(num_of_classes, dim=1) + second_class_counts, _ = pred_counts.kthvalue(num_of_classes - 1, dim=1) + + cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) + + cert_and_correct = cert & (label == top_predicted_class) + + return cert, cert_and_correct, top_predicted_class diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py new file mode 100644 index 0000000000..15069dd76c --- /dev/null +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py @@ -0,0 +1,170 @@ +import torch +from timm.models.vision_transformer import VisionTransformer +from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING + + +class PatchEmbed(torch.nn.Module): + """ + Image to Patch Embedding + + Class adapted from the implementation in https://github.com/MadryLab/smoothed-vit + + Original License: + + MIT License + + Copyright (c) 2021 Madry Lab + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE + """ + + def __init__(self, patch_size: int = 16, in_channels: int = 1, embed_dim: int = 768): + """ + Specifies the configuration for the convolutional layer. + + :param patch_size: The patch size used by the ViT. + :param in_channels: Number of input channels. + :param embed_dim: The embedding dimension used by the ViT. + """ + super().__init__() + self.patch_size = patch_size + self.in_channels = in_channels + self.embed_dim = embed_dim + self.proj: Optional[torch.nn.Conv2d] = None + + def create(self, patch_size=None, embed_dim=None, device="cpu", **kwargs) -> None: # pylint: disable=W0613 + """ + Creates a convolution that mimics the embedding layer to be used for the ablation mask to + track where the image was ablated. + + :param patch_size: The patch size used by the ViT + :param embed_dim: The embedding dimension used by the ViT + :param device: Which device to set the emdedding layer to. + :param kwargs: Handles the remaining kwargs from the ViT configuration. + """ + + if patch_size is not None: + self.patch_size = patch_size + if embed_dim is not None: + self.embed_dim = embed_dim + + self.proj = torch.nn.Conv2d( + in_channels=self.in_channels, + out_channels=self.embed_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + bias=False, + ) + w_shape = self.proj.weight.shape + self.proj.weight = torch.nn.Parameter(torch.ones(w_shape).to(device)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Forward pass through the embedder. We are simply tracking the positions of the ablation mask so no gradients + are required. + + :param x: Input data corresponding to the ablation mask + :return: The embedded input + """ + if self.proj is not None: + with torch.no_grad(): + x = self.proj(x).flatten(2).transpose(1, 2) + return x + raise ValueError("Projection layer not yet created.") + + +class ArtViT(VisionTransformer): + """ + Art class inheriting from VisionTransformer to control the forward pass of the ViT. + """ + + # Make as a class attribute to avoid being included in the + # state dictionaries of the ViT Model. + ablation_mask_embedder = PatchEmbed(in_channels=1) + + def __init__(self, **kwargs): + """ + Create a ArtViT instance + :param kwargs: keyword arguments required to create the mask embedder and the vision transformer class + Must contain ... + """ + self.to_drop_tokens = kwargs["drop_tokens"] + + if kwargs["device_type"] == "cpu" or not torch.cuda.is_available(): + self.device = torch.device("cpu") + else: # pragma: no cover + cuda_idx = torch.cuda.current_device() + self.device = torch.device(f"cuda:{cuda_idx}") + + del kwargs["drop_tokens"] + del kwargs["device_type"] + + super().__init__(**kwargs) + self.ablation_mask_embedder.create(device=self.device, **kwargs) + + self.in_chans = kwargs["in_chans"] + self.img_size = kwargs["img_size"] + + @staticmethod + def drop_tokens(x: torch.Tensor, indexes: torch.Tensor) -> torch.Tensor: + """ + Drops the tokens which correspond to fully masked inputs + + :param x: Input data + :param indexes: positions to be ablated + :return: Input with tokens dropped where the input was fully ablated. + """ + x_no_cl, cls_token = x[:, 1:], x[:, 0:1] + shape = x_no_cl.shape + + # reshape to temporarily remove batch + x_no_cl = torch.reshape(x_no_cl, shape=(-1, shape[-1])) + indexes = torch.reshape(indexes, shape=(-1,)) + indexes = indexes.nonzero(as_tuple=True)[0] + x_no_cl = torch.index_select(x_no_cl, dim=0, index=indexes) + x_no_cl = torch.reshape(x_no_cl, shape=(shape[0], -1, shape[-1])) + return torch.cat((cls_token, x_no_cl), dim=1) + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + """ + The forward pass of the ViT. + + :param x: Input data. + :return: The input processed by the ViT backbone + """ + + ablated_input = False + if x.shape[1] == self.in_chans + 1: + ablated_input = True + + if ablated_input: + x, ablation_mask = x[:, : self.in_chans], x[:, self.in_chans : self.in_chans + 1] + + x = self.patch_embed(x) + x = self._pos_embed(x) + + if self.to_drop_tokens and ablated_input: + ones = self.ablation_mask_embedder(ablation_mask) + to_drop = torch.sum(ones, dim=2) + indexes = torch.gt(torch.where(to_drop > 1, 1, 0), 0) + x = self.drop_tokens(x, indexes) + + x = self.norm_pre(x) + x = self.blocks(x) + return self.norm(x) diff --git a/art/estimators/certification/smoothed_vision_transformers/__init__.py b/art/estimators/certification/smoothed_vision_transformers_old/__init__.py similarity index 100% rename from art/estimators/certification/smoothed_vision_transformers/__init__.py rename to art/estimators/certification/smoothed_vision_transformers_old/__init__.py diff --git a/art/estimators/certification/smoothed_vision_transformers/pytorch.py b/art/estimators/certification/smoothed_vision_transformers_old/pytorch.py similarity index 100% rename from art/estimators/certification/smoothed_vision_transformers/pytorch.py rename to art/estimators/certification/smoothed_vision_transformers_old/pytorch.py diff --git a/art/estimators/certification/smoothed_vision_transformers/smooth_vit.py b/art/estimators/certification/smoothed_vision_transformers_old/smooth_vit.py similarity index 100% rename from art/estimators/certification/smoothed_vision_transformers/smooth_vit.py rename to art/estimators/certification/smoothed_vision_transformers_old/smooth_vit.py diff --git a/dev.py b/dev.py new file mode 100644 index 0000000000..afe1631813 --- /dev/null +++ b/dev.py @@ -0,0 +1,56 @@ +import torch +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing +import numpy as np +from torchvision import datasets +from torchvision import transforms + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + +def get_cifar_data(): + """ + Get CIFAR-10 data. + :return: cifar train/test data. + """ + train_set = datasets.CIFAR10('./data', train=True, download=True) + test_set = datasets.CIFAR10('./data', train=False, download=True) + + x_train = train_set.data.astype(np.float32) + y_train = np.asarray(train_set.targets) + + x_test = test_set.data.astype(np.float32) + y_test = np.asarray(test_set.targets) + + x_train = np.moveaxis(x_train, [3], [1]) + x_test = np.moveaxis(x_test, [3], [1]) + + x_train = x_train / 255.0 + x_test = x_test / 255.0 + + return (x_train, y_train), (x_test, y_test) + + +(x_train, y_train), (x_test, y_test) = get_cifar_data() + +art_model = PyTorchDeRandomizedSmoothing(model='vit_small_patch16_224', + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + replace_last_layer=True, + load_pretrained=True,) + +scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1) +art_model.fit(x_train, y_train, + nb_epochs=30, + update_batchnorm=True, + scheduler=scheduler, + transform=transforms.Compose([transforms.RandomHorizontalFlip()])) + +# torch.save(art_model.model.state_dict(), 'trained.pt') +# art_model.model.load_state_dict(torch.load('trained.pt')) +art_model.eval_and_certify(x_test, y_test, size_to_certify=4) From 2c5e4c50605239b89025a300f8818fc1624175af Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Tue, 27 Jun 2023 09:19:49 +0100 Subject: [PATCH 23/55] make colum pos optional in vit ablator, updating tests Signed-off-by: GiulioZizzo --- .../derandomized_smoothing/pytorch.py | 175 ++++------------- .../vision_transformers/pytorch.py | 178 ++---------------- .../vision_transformers/smooth_vit.py | 17 +- .../vision_transformers/vit.py | 25 ++- .../certification/test_smooth_vit.py | 17 +- 5 files changed, 104 insertions(+), 308 deletions(-) diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 422ca98d74..6946b1d416 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -39,7 +39,8 @@ if TYPE_CHECKING: # pylint: disable=C0412 import torch - + import torchvision + from timm.models.vision_transformer import VisionTransformer from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.defences.preprocessor import Preprocessor from art.defences.postprocessor import Postprocessor @@ -149,149 +150,39 @@ def _fit_classifier(self, x: np.ndarray, y: np.ndarray, batch_size: int, nb_epoc x = x.astype(ART_NUMPY_DTYPE) return PyTorchClassifier.fit(self, x, y, batch_size=batch_size, nb_epochs=nb_epochs, **kwargs) - def fit_old( # pylint: disable=W0221 - self, - x: np.ndarray, - y: np.ndarray, - batch_size: int = 128, - nb_epochs: int = 10, - training_mode: bool = True, - drop_last: bool = False, - scheduler: Optional[Any] = None, - update_batchnorm: bool = True, - batchnorm_update_epochs: int = 1, - transform: Optional["torchvision.transforms.transforms.Compose"] = None, - verbose: bool = True, - **kwargs, - ) -> None: - """ - Fit the classifier on the training set `(x, y)`. - - :param x: Training data. - :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of - shape (nb_samples,). - :param batch_size: Size of batches. - :param nb_epochs: Number of epochs to use for training. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. - :param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by - the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then - the last batch will be smaller. (default: ``False``) - :param scheduler: Learning rate scheduler to run at the start of every epoch. - :param update_batchnorm: ViT Specific Arg. If to run the training data through the model to update any batch norm statistics prior - to training. Useful on small datasets when using pre-trained ViTs. - :param batchnorm_update_epochs: ViT Specific Arg. How many times to forward pass over the training data - to pre-adjust the batchnorm statistics. - :param transform: ViT Specific Arg. Torchvision compose of relevant augmentation transformations to apply. - :param verbose: if to display training progress bars - :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch - and providing it takes no effect. - """ - import torch - - # Check if we have a VIT - - # Set model mode - self._model.train(mode=training_mode) - - if self._optimizer is None: # pragma: no cover - raise ValueError("An optimizer is needed to train the model, but none for provided.") - - y = check_and_transform_label_format(y, nb_classes=self.nb_classes) - - # Apply preprocessing - x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) - - if update_batchnorm: # VIT specific - self.update_batchnorm(x_preprocessed, batch_size, nb_epochs=batchnorm_update_epochs) - - # Check label shape - y_preprocessed = self.reduce_labels(y_preprocessed) - - num_batch = len(x_preprocessed) / float(batch_size) - if drop_last: - num_batch = int(np.floor(num_batch)) - else: - num_batch = int(np.ceil(num_batch)) - ind = np.arange(len(x_preprocessed)) - - # Start training - for _ in tqdm(range(nb_epochs)): - # Shuffle the examples - random.shuffle(ind) - - epoch_acc = [] - epoch_loss = [] - epoch_batch_sizes = [] - - pbar = tqdm(range(num_batch), disable=not verbose) - - # Train for one epoch - for m in pbar: - i_batch = np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]) - i_batch = self.ablator.forward(i_batch) - - if transform is not None: # VIT specific - i_batch = transform(i_batch) - - i_batch = torch.from_numpy(i_batch).to(self._device) - o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device) - - # Zero the parameter gradients - self._optimizer.zero_grad() - - # Perform prediction - try: - model_outputs = self.model(i_batch) - except ValueError as err: - if "Expected more than 1 value per channel when training" in str(err): - logger.exception( - "Try dropping the last incomplete batch by setting drop_last=True in " - "method PyTorchClassifier.fit." - ) - raise err - - loss = self.loss(model_outputs, o_batch) - acc = self.get_accuracy(preds=model_outputs, labels=o_batch) - - # Do training - if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 - - with amp.scale_loss(loss, self._optimizer) as scaled_loss: - scaled_loss.backward() - else: - loss.backward() - - self.optimizer.step() - - epoch_acc.append(acc) - epoch_loss.append(loss.cpu().detach().numpy()) - epoch_batch_sizes.append(len(i_batch)) - - if verbose: - pbar.set_description( - f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} " - f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} " - ) +class PyTorchDeRandomizedSmoothing(PyTorchDeRandomizedSmoothingCNN, PyTorchSmoothedViT): + """ + Interface class for the two De-randomized smoothing approaches supported by ART for pytorch. - if scheduler is not None: - scheduler.step() + If a regular pytorch neural network is fed in then (De)Randomized Smoothing as introduced in Levine et al. (2020) + is used. + Otherwise, if a timm vision transfomer is fed in then Certified Patch Robustness via Smoothed Vision Transformers + as introduced in Salman et al. (2021) is used. + """ -class PyTorchDeRandomizedSmoothing(PyTorchDeRandomizedSmoothingCNN, PyTorchSmoothedViT): def __init__(self, model: Union[str, "VisionTransformer", "torch.nn.Module"], **kwargs): import torch - if isinstance(model, torch.nn.Module): - PyTorchDeRandomizedSmoothingCNN.__init__(self, model, **kwargs) - self.mode = "CNN" + self.mode = None if importlib.util.find_spec("timm") is not None: from timm.models.vision_transformer import VisionTransformer - if isinstance(model, VisionTransformer) or isinstance(model, str): + if isinstance(model, (VisionTransformer, str)): PyTorchSmoothedViT.__init__(self, model, **kwargs) self.mode = "ViT" + else: + if isinstance(model, torch.nn.Module): + PyTorchDeRandomizedSmoothingCNN.__init__(self, model, **kwargs) + self.mode = "CNN" + + elif isinstance(model, torch.nn.Module): + PyTorchDeRandomizedSmoothingCNN.__init__(self, model, **kwargs) + self.mode = "CNN" + + if self.mode is None: + raise ValueError("Model type not recognized.") def fit( # pylint: disable=W0221 self, @@ -373,10 +264,11 @@ def fit( # pylint: disable=W0221 i_batch = np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]) i_batch = self.ablator.forward(i_batch) - if transform is not None and self.mode == "ViT": # VIT specific + if transform is not None and self.mode == "ViT": # VIT specific i_batch = transform(i_batch) - i_batch = torch.from_numpy(i_batch).to(self._device) + if isinstance(i_batch, np.ndarray): + i_batch = torch.from_numpy(i_batch).to(self._device) o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device) # Zero the parameter gradients @@ -421,4 +313,19 @@ def fit( # pylint: disable=W0221 if scheduler is not None: scheduler.step() + @staticmethod + def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: + """ + Helper function to get the accuracy during training. + + :param preds: model predictions. + :param labels: ground truth labels (not one hot). + :return: prediction accuracy. + """ + if not isinstance(preds, np.ndarray): + preds = preds.detach().cpu().numpy() + + if not isinstance(labels, np.ndarray): + labels = labels.detach().cpu().numpy() + return np.sum(np.argmax(preds, axis=1) == labels) / len(labels) diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py index 1675dfa2c2..798ad53405 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py @@ -26,7 +26,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING +from typing import List, Optional, Tuple, Union, TYPE_CHECKING import random import numpy as np @@ -37,7 +37,9 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - import torchvision + import torch + from timm.models.vision_transformer import VisionTransformer + from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.defences.preprocessor import Preprocessor from art.defences.postprocessor import Postprocessor @@ -108,7 +110,7 @@ def __init__( import timm import torch from timm.models.vision_transformer import VisionTransformer - from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import ArtViT + from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT # temporarily assign the original method to tmp_func tmp_func = timm.models.vision_transformer._create_vision_transformer @@ -158,8 +160,8 @@ def __init__( converted_optimizer.load_state_dict(opt_state_dict) self.to_reshape = False - if not isinstance(model, ArtViT): - raise ValueError("Vision transformer is not of ArtViT. Error occurred in ArtViT creation.") + if not isinstance(model, PyTorchViT): + raise ValueError("Vision transformer is not of PyTorchViT. Error occurred in PyTorchViT creation.") if model.default_cfg["input_size"][0] != input_shape[0]: raise ValueError( @@ -170,10 +172,12 @@ def __init__( if model.default_cfg["input_size"] != input_shape: if verbose: logger.warning( - f"ViT expects input shape of {model.default_cfg['input_size']}, " - f"but {input_shape} specified as the input shape. " - f"The input will be rescaled to {model.default_cfg['input_size']}" + " ViT expects input shape of: (%i, %i, %i) but (%i, %i, %i) specified as the input shape. The input will be rescaled to (%i, %i, %i)", + *model.default_cfg["input_size"], + *input_shape, + *model.default_cfg["input_size"], ) + self.to_reshape = True if optimizer is None or isinstance(optimizer, torch.optim.Optimizer): @@ -276,7 +280,7 @@ def get_models(cls, generate_from_null: bool = False) -> List[str]: models = timm.list_models("vit_*") for model in models: - logger.info(f"Testing {model} creation") + logger.info("Testing %s creation", model) try: _ = PyTorchSmoothedViT( model=model, @@ -303,9 +307,9 @@ def get_models(cls, generate_from_null: bool = False) -> List[str]: return supported @staticmethod - def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> "ArtViT": + def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> "PyTorchViT": """ - Creates a vision transformer using ArtViT which controls the forward pass of the model + Creates a vision transformer using PyTorchViT which controls the forward pass of the model :param variant: The name of the vision transformer to load :param pretrained: If to load pre-trained weights @@ -314,10 +318,10 @@ def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwar from timm.models._builder import build_model_with_cfg from timm.models.vision_transformer import checkpoint_filter_fn - from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import ArtViT + from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT return build_model_with_cfg( - ArtViT, + PyTorchViT, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, @@ -326,13 +330,14 @@ def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwar def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) -> None: """ - Method to update the batchnorm of a ViT on small datasets + Method to update the batchnorm of a neural network on small datasets when it was pre-trained :param x: Training data. :param batch_size: Size of batches. :param nb_epochs: How many times to forward pass over the input data """ import torch + self.model.train() ind = np.arange(len(x)) @@ -345,133 +350,6 @@ def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) - i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) _ = self.model(i_batch) - def fit_old( # pylint: disable=W0221 - self, - x: np.ndarray, - y: np.ndarray, - batch_size: int = 128, - nb_epochs: int = 10, - training_mode: bool = True, - drop_last: bool = False, - scheduler: Optional[Any] = None, - update_batchnorm: bool = True, - batchnorm_update_epochs: int = 1, - transform: Optional["torchvision.transforms.transforms.Compose"] = None, - verbose: bool = True, - **kwargs, - ) -> None: - """ - Fit the classifier on the training set `(x, y)`. - - :param x: Training data. - :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of - shape (nb_samples,). - :param batch_size: Size of batches. - :param nb_epochs: Number of epochs to use for training. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. - :param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by - the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then - the last batch will be smaller. (default: ``False``) - :param scheduler: Learning rate scheduler to run at the start of every epoch. - :param update_batchnorm: if to run the training data through the model to update any batch norm statistics prior - to training. Useful on small datasets when using pre-trained ViTs. - :param batchnorm_update_epochs: how many times to forward pass over the training data - to pre-adjust the batchnorm statistics. - :param transform: Torchvision compose of relevant augmentation transformations to apply. - :param verbose: if to display training progress bars - :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch - and providing it takes no effect. - """ - import torch - - # Set model mode - self._model.train(mode=training_mode) - - if self._optimizer is None: # pragma: no cover - raise ValueError("An optimizer is needed to train the model, but none for provided.") - - y = check_and_transform_label_format(y, nb_classes=self.nb_classes) - - # Apply preprocessing - x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) - - if update_batchnorm: - self.update_batchnorm(x_preprocessed, batch_size, nb_epochs=batchnorm_update_epochs) - - # Check label shape - y_preprocessed = self.reduce_labels(y_preprocessed) - - num_batch = len(x_preprocessed) / float(batch_size) - if drop_last: - num_batch = int(np.floor(num_batch)) - else: - num_batch = int(np.ceil(num_batch)) - ind = np.arange(len(x_preprocessed)) - - # Start training - for _ in tqdm(range(nb_epochs)): - # Shuffle the examples - random.shuffle(ind) - - epoch_acc = [] - epoch_loss = [] - epoch_batch_sizes = [] - - pbar = tqdm(range(num_batch), disable=not verbose) - - # Train for one epoch - for m in pbar: - i_batch = torch.from_numpy(np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]])).to( - self._device - ) - if transform is not None: - i_batch = transform(i_batch) - i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) - - o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device) - - # Zero the parameter gradients - self._optimizer.zero_grad() - - # Perform prediction - try: - model_outputs = self.model(i_batch) - except ValueError as err: - if "Expected more than 1 value per channel when training" in str(err): - logger.exception( - "Try dropping the last incomplete batch by setting drop_last=True in " - "method PyTorchClassifier.fit." - ) - raise err - - loss = self.loss(model_outputs, o_batch) - acc = self.get_accuracy(preds=model_outputs, labels=o_batch) - - # Do training - if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 - - with amp.scale_loss(loss, self._optimizer) as scaled_loss: - scaled_loss.backward() - - else: - loss.backward() - - self.optimizer.step() - - epoch_acc.append(acc) - epoch_loss.append(loss.cpu().detach().numpy()) - epoch_batch_sizes.append(len(i_batch)) - - if verbose: - pbar.set_description( - f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} " - f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} " - ) - - if scheduler is not None: - scheduler.step() - def eval_and_certify( self, x: np.ndarray, @@ -539,21 +417,3 @@ def eval_and_certify( pbar.set_description(f"Normal Acc {accuracy / n_samples:.3f} " f"Cert Acc {cert_sum / n_samples:.3f}") return (accuracy / n_samples), (cert_sum / n_samples) - - @staticmethod - def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: - """ - Helper function to get the accuracy during training. - - :param preds: model predictions. - :param labels: ground truth labels (not one hot). - :return: prediction accuracy. - """ - - if not isinstance(preds, np.ndarray): - preds = preds.detach().cpu().numpy() - - if not isinstance(preds, np.ndarray): - labels = labels.detach().cpu().numpy() - - return np.sum(np.argmax(preds, axis=1) == labels) / len(labels) diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py index 5bf993c707..fb8c1795f7 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py @@ -27,6 +27,7 @@ from typing import Optional, Tuple import random +import numpy as np import torch @@ -39,7 +40,7 @@ class UpSampler(torch.nn.Module): def __init__(self, input_size: int, final_size: int) -> None: """ Creates an upsampler to make the supplied data match the pre-trained ViT format - + :param input_size: Size of the current input data :param final_size: Desired final size """ @@ -93,7 +94,7 @@ def __init__( if original_shape is not None and output_shape is not None: self.upsample = UpSampler(input_size=original_shape[1], final_size=output_shape[1]) - def ablate(self, x: torch.Tensor, column_pos: Optional[int] = None) -> torch.Tensor: + def ablate(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: """ Ablates the input colum wise @@ -102,9 +103,6 @@ def ablate(self, x: torch.Tensor, column_pos: Optional[int] = None) -> torch.Ten :return: The ablated input with 0s where the ablation occurred """ k = self.ablation_size - if column_pos is None: - column_pos = random.randint(0, x.shape[3]) - if column_pos + k > x.shape[-1]: x[:, :, :, (column_pos + k) % x.shape[-1] : column_pos] = 0.0 else: @@ -112,7 +110,7 @@ def ablate(self, x: torch.Tensor, column_pos: Optional[int] = None) -> torch.Ten x[:, :, :, column_pos + k :] = 0.0 return x - def forward(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: + def forward(self, x: torch.Tensor, column_pos: Optional[int] = None) -> torch.Tensor: """ Forward pass though the ablator. We insert a new channel to keep track of the ablation location. @@ -121,6 +119,13 @@ def forward(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: :return: The albated input with an extra channel indicating the location of the ablation """ assert x.shape[1] == 3 + + if column_pos is None: + column_pos = random.randint(0, x.shape[3]) + + if isinstance(x, np.ndarray): + x = torch.from_numpy(x).to(self.device) + ones = torch.torch.ones_like(x[:, 0:1, :, :]).to(self.device) x = torch.cat([x, ones], dim=1) x = self.ablate(x, column_pos=column_pos) diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py index 15069dd76c..49168e38fa 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py @@ -1,6 +1,27 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +""" +Implements functionality for running Vision Transformers in ART +""" +from typing import Optional + import torch from timm.models.vision_transformer import VisionTransformer -from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING class PatchEmbed(torch.nn.Module): @@ -89,7 +110,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: raise ValueError("Projection layer not yet created.") -class ArtViT(VisionTransformer): +class PyTorchViT(VisionTransformer): """ Art class inheriting from VisionTransformer to control the forward pass of the ViT. """ diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index 1d8a13c8ae..f1c9566d73 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -20,8 +20,6 @@ import numpy as np from art.utils import load_dataset -from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT -from art.estimators.certification.smoothed_vision_transformers.pytorch import ArtViT from tests.utils import ARTTestException @@ -68,7 +66,7 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): import torch device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - from art.estimators.certification.smoothed_vision_transformers.smooth_vit import ColumnAblator + from art.estimators.certification.derandomized_smoothing.vision_transformers.smooth_vit import ColumnAblator try: cifar_data = fix_get_cifar10_data[0] @@ -132,12 +130,13 @@ def test_pytorch_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data) Check that the training loop for pytorch does not result in errors """ import torch + from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing try: cifar_data = fix_get_cifar10_data[0][:50] cifar_labels = fix_get_cifar10_data[1][:50] - art_model = PyTorchSmoothedViT( + art_model = PyTorchDeRandomizedSmoothing( model="vit_small_patch16_224", loss=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.SGD, @@ -147,6 +146,7 @@ def test_pytorch_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data) ablation_size=4, load_pretrained=True, replace_last_layer=True, + verbose=False, ) scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[1], gamma=0.1) @@ -161,7 +161,7 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 """ Check that ... """ - from art.estimators.certification.smoothed_vision_transformers.smooth_vit import ColumnAblator + from art.estimators.certification.derandomized_smoothing.vision_transformers.smooth_vit import ColumnAblator import torch try: @@ -187,6 +187,8 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 @pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") def test_equivalence(fix_get_cifar10_data): import torch + from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing + from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT class MadrylabImplementations: """ @@ -309,9 +311,9 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor: return self.norm(x) # Replace the forward_features with the forward_features code with checks. - ArtViT.forward_features = forward_features + PyTorchViT.forward_features = forward_features - art_model = PyTorchSmoothedViT( + art_model = PyTorchDeRandomizedSmoothing( model="vit_small_patch16_224", loss=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.SGD, @@ -321,6 +323,7 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor: ablation_size=4, load_pretrained=False, replace_last_layer=True, + verbose=False, ) cifar_data = fix_get_cifar10_data[0][:50] From 784b7e9c484b33f96f9236760f2e29542ea2206f Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Tue, 27 Jun 2023 20:16:14 +0100 Subject: [PATCH 24/55] init refactor Signed-off-by: GiulioZizzo --- .../derandomized_smoothing.py | 9 +- .../derandomized_smoothing/pytorch.py | 372 +++++++++++++++--- .../vision_transformers/pytorch.py | 262 +----------- .../vision_transformers/smooth_vit.py | 11 +- dev.py | 105 +++-- 5 files changed, 418 insertions(+), 341 deletions(-) diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py index 42a31ca418..387d300130 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py @@ -159,13 +159,14 @@ def __call__( raise NotImplementedError @abstractmethod - def certify(self, preds: np.ndarray, size_to_certify: int): + def certify(self, preds: np.ndarray, size_to_certify: int, label: Optional[np.ndarray] = None): """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. :param preds: The cumulative predictions of the classifier over the ablation locations. :param size_to_certify: The size of the patch to check against. + :param label: ground truth labels """ raise NotImplementedError @@ -230,13 +231,14 @@ def __call__( """ return self.forward(x=x, column_pos=column_pos) - def certify(self, preds: np.ndarray, size_to_certify: int) -> np.ndarray: + def certify(self, preds: np.ndarray, size_to_certify: int, label: Optional[np.ndarray] = None) -> np.ndarray: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. :param preds: The cumulative predictions of the classifier over the ablation locations. :param size_to_certify: The size of the patch to check against. + :param label: Ground truth labels :return: Array of bools indicating if a point is certified against the given patch dimensions. """ indices = np.argsort(-preds, axis=1, kind="stable") @@ -348,13 +350,14 @@ def __call__( """ return self.forward(x=x, row_pos=row_pos, column_pos=column_pos) - def certify(self, preds: np.ndarray, size_to_certify: int) -> np.ndarray: + def certify(self, preds: np.ndarray, size_to_certify: int, label: Optional[np.ndarray] = None) -> np.ndarray: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. :param preds: The cumulative predictions of the classifier over the ablation locations. :param size_to_certify: The size of the patch to check against. + :param label: Ground truth labels :return: Array of bools indicating if a point is certified against the given patch dimensions. """ indices = np.argsort(-preds, axis=1, kind="stable") diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 6946b1d416..f6064b2c01 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -16,9 +16,20 @@ # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ -This module implements (De)Randomized Smoothing for Certifiable Defense against Patch Attacks +This module implements the two De-randomized smoothing approaches supported by ART for pytorch. + +(De)Randomized Smoothing for Certifiable Defense against Patch Attacks | Paper link: https://arxiv.org/abs/2002.10733 + +and + +Certified Patch Robustness via Smoothed Vision Transformers + +| Paper link Accepted version: + https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf + +| Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf """ from __future__ import absolute_import, division, print_function, unicode_literals @@ -48,7 +59,7 @@ logger = logging.getLogger(__name__) -class PyTorchDeRandomizedSmoothingCNN(DeRandomizedSmoothingMixin, PyTorchClassifier): +class PyTorchDeRandomizedSmoothingCNN(DeRandomizedSmoothingMixin): """ Implementation of (De)Randomized Smoothing applied to classifier predictions as introduced in Levine et al. (2020). @@ -56,26 +67,7 @@ class PyTorchDeRandomizedSmoothingCNN(DeRandomizedSmoothingMixin, PyTorchClassif | Paper link: https://arxiv.org/abs/2002.10733 """ - estimator_params = PyTorchClassifier.estimator_params + ["ablation_type", "ablation_size", "threshold", "logits"] - - def __init__( - self, - model: "torch.nn.Module", - loss: "torch.nn.modules.loss._Loss", - input_shape: Tuple[int, ...], - nb_classes: int, - ablation_type: str, - ablation_size: int, - threshold: float, - logits: bool, - optimizer: Optional["torch.optim.Optimizer"] = None, # type: ignore - channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, - preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), - device_type: str = "gpu", - ): + def __init__(self, **kwargs): """ Create a derandomized smoothing classifier. @@ -103,23 +95,7 @@ def __init__( be divided by the second one. :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. """ - super().__init__( - model=model, - loss=loss, - input_shape=input_shape, - nb_classes=nb_classes, - optimizer=optimizer, - channels_first=channels_first, - clip_values=clip_values, - preprocessing_defences=preprocessing_defences, - postprocessing_defences=postprocessing_defences, - preprocessing=preprocessing, - device_type=device_type, - ablation_type=ablation_type, - ablation_size=ablation_size, - threshold=threshold, - logits=logits, - ) + super().__init__(**kwargs) def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: import torch @@ -146,12 +122,8 @@ def predict( """ return DeRandomizedSmoothingMixin.predict(self, x, batch_size=batch_size, training_mode=training_mode, **kwargs) - def _fit_classifier(self, x: np.ndarray, y: np.ndarray, batch_size: int, nb_epochs: int, **kwargs) -> None: - x = x.astype(ART_NUMPY_DTYPE) - return PyTorchClassifier.fit(self, x, y, batch_size=batch_size, nb_epochs=nb_epochs, **kwargs) - -class PyTorchDeRandomizedSmoothing(PyTorchDeRandomizedSmoothingCNN, PyTorchSmoothedViT): +class PyTorchDeRandomizedSmoothing(PyTorchDeRandomizedSmoothingCNN, PyTorchSmoothedViT, PyTorchClassifier): """ Interface class for the two De-randomized smoothing approaches supported by ART for pytorch. @@ -162,24 +134,217 @@ class PyTorchDeRandomizedSmoothing(PyTorchDeRandomizedSmoothingCNN, PyTorchSmoot as introduced in Salman et al. (2021) is used. """ - def __init__(self, model: Union[str, "VisionTransformer", "torch.nn.Module"], **kwargs): + def __init__( + self, + model: Union[str, "VisionTransformer", "torch.nn.Module"], + loss: "torch.nn.modules.loss._Loss", + input_shape: Tuple[int, ...], + nb_classes: int, + ablation_size: int, + replace_last_layer: Optional[bool] = None, + drop_tokens: bool = True, + load_pretrained: bool = True, + optimizer: Union[type, "torch.optim.Optimizer", None] = None, + optimizer_params: Optional[dict] = None, + channels_first: bool = True, + ablation_type: Optional[str] = None, + threshold: Optional[float] = None, + logits: Optional[bool] = True, + clip_values: Optional["CLIP_VALUES_TYPE"] = None, + preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, + postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), + device_type: str = "gpu", + verbose: bool = True, + **kwargs, + ): + """ + Create a smoothed classifier. + + :param model: To run Salman et al. (2021): + Either a string specifying which ViT architecture to load, or a vision transformer already + created with the Pytorch Image Models (timm) library. + To run Levine et al. (2020) provide a regular pytorch model + :param loss: The loss function for which to compute gradients for training. The target label must be raw + categorical, i.e. not converted to one-hot encoding. + :param input_shape: The shape of one input instance. + :param nb_classes: The number of classes of the model. + :param ablation_size: The size of the data portion to retain after ablation. + :param replace_last_layer: ViT Specific. If to replace the last layer of the ViT with a fresh layer + matching the number of classes for the dataset to be examined. + Needed if going from the pre-trained imagenet models to fine-tune + on a dataset like CIFAR. + :param drop_tokens: ViT Specific. If to drop the fully ablated tokens in the ViT + :param load_pretrained: ViT Specific. If to load a pretrained model matching the ViT name. + Will only affect the ViT if a string name is passed to model rather than a ViT directly. + :param optimizer: The optimizer used to train the classifier. + :param ablation_type: Specific to Levine et al. The type of ablation to perform, + must be either "column" or "block" + :param threshold: Specific to Levine et al. The minimum threshold to count a prediction. + :param logits: Specific to Levine et al. If the model returns logits or normalized probabilities + :param channels_first: Set channels first or last. + :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and + maximum values allowed for features. If floats are provided, these will be used as the range of all + features. If arrays are provided, each value will be considered the bound for a feature, thus + the shape of clip values needs to match the total number of features. + :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier. + :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier. + :param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be + used for data preprocessing. The first value will be subtracted from the input. The input will then + be divided by the second one. + :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. + """ + import torch self.mode = None if importlib.util.find_spec("timm") is not None: from timm.models.vision_transformer import VisionTransformer + from art.estimators.certification.derandomized_smoothing.vision_transformers.smooth_vit import ColumnAblator if isinstance(model, (VisionTransformer, str)): - PyTorchSmoothedViT.__init__(self, model, **kwargs) + import timm + from timm.models.vision_transformer import VisionTransformer + from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT + + if replace_last_layer is None: + raise ValueError("If using ViTs please specify if the last layer should be replaced") + + # temporarily assign the original method to tmp_func + tmp_func = timm.models.vision_transformer._create_vision_transformer + + # overrride with ART's ViT creation function + timm.models.vision_transformer._create_vision_transformer = self.art_create_vision_transformer + if isinstance(model, str): + model = timm.create_model( + model, pretrained=load_pretrained, drop_tokens=drop_tokens, device_type=device_type + ) + if replace_last_layer: + model.head = torch.nn.Linear(model.head.in_features, nb_classes) + if isinstance(optimizer, type): + if optimizer_params is not None: + optimizer = optimizer(model.parameters(), **optimizer_params) + else: + raise ValueError("If providing an optimiser please also supply its parameters") + + elif isinstance(model, VisionTransformer): + pretrained_cfg = model.pretrained_cfg + supplied_state_dict = model.state_dict() + supported_models = self.get_models() + if pretrained_cfg["architecture"] not in supported_models: + raise ValueError( + "Architecture not supported. Use PyTorchSmoothedViT.get_models() " + "to get the supported model architectures." + ) + model = timm.create_model( + pretrained_cfg["architecture"], drop_tokens=drop_tokens, device_type=device_type + ) + model.load_state_dict(supplied_state_dict) + if replace_last_layer: + model.head = torch.nn.Linear(model.head.in_features, nb_classes) + + if optimizer is not None: + if not isinstance(optimizer, torch.optim.Optimizer): + raise ValueError("Optimizer error: must be a torch.optim.Optimizer instance") + + converted_optimizer: Union[torch.optim.Adam, torch.optim.SGD] + opt_state_dict = optimizer.state_dict() + if isinstance(optimizer, torch.optim.Adam): + logging.info("Converting Adam Optimiser") + converted_optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) + elif isinstance(optimizer, torch.optim.SGD): + logging.info("Converting SGD Optimiser") + converted_optimizer = torch.optim.SGD(model.parameters(), lr=1e-4) + else: + raise ValueError("Optimiser not supported for conversion") + converted_optimizer.load_state_dict(opt_state_dict) + + self.to_reshape = False + if not isinstance(model, PyTorchViT): + raise ValueError("Vision transformer is not of PyTorchViT. Error occurred in PyTorchViT creation.") + + if model.default_cfg["input_size"][0] != input_shape[0]: + raise ValueError( + f'ViT requires {model.default_cfg["input_size"][0]} channel input,' + f" but {input_shape[0]} channels were provided." + ) + + if model.default_cfg["input_size"] != input_shape: + if verbose: + logger.warning( + " ViT expects input shape of: (%i, %i, %i) but (%i, %i, %i) specified as the input shape. The input will be rescaled to (%i, %i, %i)", + *model.default_cfg["input_size"], + *input_shape, + *model.default_cfg["input_size"], + ) + + self.to_reshape = True + + if optimizer is None or isinstance(optimizer, torch.optim.Optimizer): + super().__init__( + model=model, + loss=loss, + input_shape=input_shape, + nb_classes=nb_classes, + optimizer=optimizer, + channels_first=channels_first, + clip_values=clip_values, + preprocessing_defences=preprocessing_defences, + postprocessing_defences=postprocessing_defences, + preprocessing=preprocessing, + device_type=device_type, + ablation_type="column", + ablation_size=ablation_size, + threshold=0.0, + logits=True, + ) + else: + raise ValueError("Error occurred in optimizer creation") + + self.ablation_size = (ablation_size,) + + if verbose: + logger.info(self.model) + + self.ablator = ColumnAblator( + ablation_size=ablation_size, + channels_first=True, + to_reshape=self.to_reshape, + original_shape=input_shape, + output_shape=model.default_cfg["input_size"], + device_type=device_type, + ) + + # set the method back to avoid unexpected side effects later on should timm need to be reused. + timm.models.vision_transformer._create_vision_transformer = tmp_func + self.mode = "ViT" else: if isinstance(model, torch.nn.Module): - PyTorchDeRandomizedSmoothingCNN.__init__(self, model, **kwargs) - self.mode = "CNN" + if ablation_type is None or threshold is None or logits is None: + raise ValueError( + "If using CNN please specify if the model returns logits, " + " the prediction threshold, and ablation type" + ) - elif isinstance(model, torch.nn.Module): - PyTorchDeRandomizedSmoothingCNN.__init__(self, model, **kwargs) - self.mode = "CNN" + super().__init__( + model=model, + loss=loss, + input_shape=input_shape, + nb_classes=nb_classes, + optimizer=optimizer, + channels_first=channels_first, + clip_values=clip_values, + preprocessing_defences=preprocessing_defences, + postprocessing_defences=postprocessing_defences, + preprocessing=preprocessing, + device_type=device_type, + ablation_type=ablation_type, + ablation_size=ablation_size, + threshold=threshold, + logits=logits, + ) + self.mode = "CNN" if self.mode is None: raise ValueError("Model type not recognized.") @@ -242,10 +407,7 @@ def fit( # pylint: disable=W0221 y_preprocessed = self.reduce_labels(y_preprocessed) num_batch = len(x_preprocessed) / float(batch_size) - if drop_last: - num_batch = int(np.floor(num_batch)) - else: - num_batch = int(np.ceil(num_batch)) + num_batch = int(np.floor(num_batch)) if drop_last else int(np.ceil(num_batch)) ind = np.arange(len(x_preprocessed)) # Start training @@ -329,3 +491,103 @@ def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndar labels = labels.detach().cpu().numpy() return np.sum(np.argmax(preds, axis=1) == labels) / len(labels) + + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: + if self.mode == "ViT": + return PyTorchClassifier.predict(self, x, batch_size, training_mode, **kwargs) + if self.mode == "CNN": + return PyTorchDeRandomizedSmoothingCNN.predict(self, x, batch_size, training_mode, **kwargs) + raise ValueError('mode is not ViT or CNN') + + def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) -> None: + """ + Method to update the batchnorm of a neural network on small datasets when it was pre-trained + + :param x: Training data. + :param batch_size: Size of batches. + :param nb_epochs: How many times to forward pass over the input data + """ + import torch + if self.mode != 'ViT': + raise ValueError('Accessing a ViT specific functionality while running in CNN mode') + + self.model.train() + + ind = np.arange(len(x)) + num_batch = int(len(x) / float(batch_size)) + + with torch.no_grad(): + for _ in tqdm(range(nb_epochs)): + for m in tqdm(range(num_batch)): + i_batch = np.copy(x[ind[m * batch_size : (m + 1) * batch_size]]) + i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) + _ = self.model(i_batch) + + def eval_and_certify( + self, + x: np.ndarray, + y: np.ndarray, + size_to_certify: int, + batch_size: int = 128, + verbose: bool = True, + ) -> Tuple["torch.Tensor", "torch.Tensor"]: + """ + Evaluates the ViT's normal and certified performance over the supplied data. + + :param x: Evaluation data. + :param y: Evaluation labels. + :param size_to_certify: The size of the patch to certify against. + If not provided will default to the ablation size. + :param batch_size: batch size when evaluating. + :param verbose: If to display the progress bar + :return: The accuracy and certified accuracy over the dataset + """ + import torch + if self.mode != 'ViT': + raise ValueError('Accessing a ViT specific functionality while running in CNN mode') + + self.model.eval() + y = check_and_transform_label_format(y, nb_classes=self.nb_classes) + + # Apply preprocessing + x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) + + # Check label shape + y_preprocessed = self.reduce_labels(y_preprocessed) + + num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) + pbar = tqdm(range(num_batch), disable=not verbose) + accuracy = torch.tensor(0.0).to(self._device) + cert_sum = torch.tensor(0.0).to(self._device) + n_samples = 0 + + with torch.no_grad(): + for m in pbar: + if m == (num_batch - 1): + i_batch = np.copy(x_preprocessed[m * batch_size :]) + o_batch = y_preprocessed[m * batch_size :] + else: + i_batch = np.copy(x_preprocessed[m * batch_size : (m + 1) * batch_size]) + o_batch = y_preprocessed[m * batch_size : (m + 1) * batch_size] + + predictions = [] + pred_counts = np.zeros((len(i_batch), self.nb_classes)) + for pos in range(i_batch.shape[-1]): + ablated_batch = self.ablator.forward(i_batch, column_pos=pos) + + # Perform prediction + model_outputs = self.model(ablated_batch) + pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1).cpu()] += 1 + predictions.append(model_outputs) + + _, cert_and_correct, top_predicted_class = self.ablator.certify( + pred_counts, size_to_certify=size_to_certify, label=o_batch + ) + cert_sum += torch.sum(cert_and_correct) + o_batch = torch.from_numpy(o_batch).to(self.device) + accuracy += torch.sum(top_predicted_class == o_batch) + n_samples += len(cert_and_correct) + + pbar.set_description(f"Normal Acc {accuracy / n_samples:.3f} " f"Cert Acc {cert_sum / n_samples:.3f}") + + return (accuracy / n_samples), (cert_sum / n_samples) diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py index 798ad53405..b577d448cb 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py @@ -26,29 +26,18 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import List, Optional, Tuple, Union, TYPE_CHECKING -import random +from typing import List, TYPE_CHECKING -import numpy as np -from tqdm import tqdm - -from art.estimators.classification.pytorch import PyTorchClassifier -from art.estimators.certification.derandomized_smoothing.vision_transformers.smooth_vit import ColumnAblator -from art.utils import check_and_transform_label_format if TYPE_CHECKING: import torch - from timm.models.vision_transformer import VisionTransformer from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT - from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE - from art.defences.preprocessor import Preprocessor - from art.defences.postprocessor import Postprocessor logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) -class PyTorchSmoothedViT(PyTorchClassifier): +class PyTorchSmoothedViT: """ Implementation of Certified Patch Robustness via Smoothed Vision Transformers @@ -58,161 +47,8 @@ class PyTorchSmoothedViT(PyTorchClassifier): | Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf """ - def __init__( - self, - model: Union["VisionTransformer", str], - loss: "torch.nn.modules.loss._Loss", - input_shape: Tuple[int, ...], - nb_classes: int, - ablation_size: int, - replace_last_layer: bool, - drop_tokens: bool = True, - load_pretrained: bool = True, - optimizer: Union[type, "torch.optim.Optimizer", None] = None, - optimizer_params: Optional[dict] = None, - channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, - preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), - device_type: str = "gpu", - verbose: bool = True, - ): - """ - Create a smoothed ViT classifier. - - :param model: Either a string specifying which ViT architecture to load, or a vision transformer already - created with the Pytorch Image Models (timm) library. - :param loss: The loss function for which to compute gradients for training. The target label must be raw - categorical, i.e. not converted to one-hot encoding. - :param input_shape: The shape of one input instance. - :param nb_classes: The number of classes of the model. - :param ablation_size: The size of the data portion to retain after ablation. - :param replace_last_layer: If to replace the last layer of the ViT with a fresh layer matching the number - of classes for the dataset to be examined. Needed if going from the pre-trained - imagenet models to fine-tune on a dataset like CIFAR. - :param drop_tokens: If to drop the fully ablated tokens in the ViT - :param load_pretrained: If to load a pretrained model matching the ViT name. Will only affect the ViT if a - string name is passed to model rather than a ViT directly. - :param optimizer: The optimizer used to train the classifier. - :param channels_first: Set channels first or last. - :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and - maximum values allowed for features. If floats are provided, these will be used as the range of all - features. If arrays are provided, each value will be considered the bound for a feature, thus - the shape of clip values needs to match the total number of features. - :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier. - :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier. - :param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be - used for data preprocessing. The first value will be subtracted from the input. The input will then - be divided by the second one. - :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. - """ - import timm - import torch - from timm.models.vision_transformer import VisionTransformer - from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT - - # temporarily assign the original method to tmp_func - tmp_func = timm.models.vision_transformer._create_vision_transformer - - # overrride with ART's ViT creation function - timm.models.vision_transformer._create_vision_transformer = self.art_create_vision_transformer - if isinstance(model, str): - model = timm.create_model( - model, pretrained=load_pretrained, drop_tokens=drop_tokens, device_type=device_type - ) - if replace_last_layer: - model.head = torch.nn.Linear(model.head.in_features, nb_classes) - if isinstance(optimizer, type): - if optimizer_params is not None: - optimizer = optimizer(model.parameters(), **optimizer_params) - else: - raise ValueError("If providing an optimiser please also supply its parameters") - - elif isinstance(model, VisionTransformer): - pretrained_cfg = model.pretrained_cfg - supplied_state_dict = model.state_dict() - supported_models = self.get_models() - if pretrained_cfg["architecture"] not in supported_models: - raise ValueError( - "Architecture not supported. Use PyTorchSmoothedViT.get_models() " - "to get the supported model architectures." - ) - model = timm.create_model(pretrained_cfg["architecture"], drop_tokens=drop_tokens, device_type=device_type) - model.load_state_dict(supplied_state_dict) - if replace_last_layer: - model.head = torch.nn.Linear(model.head.in_features, nb_classes) - - if optimizer is not None: - if not isinstance(optimizer, torch.optim.Optimizer): - raise ValueError("Optimizer error: must be a torch.optim.Optimizer instance") - - converted_optimizer: Union[torch.optim.Adam, torch.optim.SGD] - opt_state_dict = optimizer.state_dict() - if isinstance(optimizer, torch.optim.Adam): - logging.info("Converting Adam Optimiser") - converted_optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) - elif isinstance(optimizer, torch.optim.SGD): - logging.info("Converting SGD Optimiser") - converted_optimizer = torch.optim.SGD(model.parameters(), lr=1e-4) - else: - raise ValueError("Optimiser not supported for conversion") - converted_optimizer.load_state_dict(opt_state_dict) - - self.to_reshape = False - if not isinstance(model, PyTorchViT): - raise ValueError("Vision transformer is not of PyTorchViT. Error occurred in PyTorchViT creation.") - - if model.default_cfg["input_size"][0] != input_shape[0]: - raise ValueError( - f'ViT requires {model.default_cfg["input_size"][0]} channel input,' - f" but {input_shape[0]} channels were provided." - ) - - if model.default_cfg["input_size"] != input_shape: - if verbose: - logger.warning( - " ViT expects input shape of: (%i, %i, %i) but (%i, %i, %i) specified as the input shape. The input will be rescaled to (%i, %i, %i)", - *model.default_cfg["input_size"], - *input_shape, - *model.default_cfg["input_size"], - ) - - self.to_reshape = True - - if optimizer is None or isinstance(optimizer, torch.optim.Optimizer): - super().__init__( - model=model, - loss=loss, - input_shape=input_shape, - nb_classes=nb_classes, - optimizer=optimizer, - channels_first=channels_first, - clip_values=clip_values, - preprocessing_defences=preprocessing_defences, - postprocessing_defences=postprocessing_defences, - preprocessing=preprocessing, - device_type=device_type, - ) - else: - raise ValueError("Error occurred in optimizer creation") - - self.ablation_size = (ablation_size,) - - if verbose: - logger.info(self.model) - - self.ablator = ColumnAblator( - ablation_size=ablation_size, - channels_first=True, - to_reshape=self.to_reshape, - original_shape=input_shape, - output_shape=model.default_cfg["input_size"], - device_type=device_type, - ) - - # set the method back to avoid unexpected side effects later on should timm need to be reused. - timm.models.vision_transformer._create_vision_transformer = tmp_func + def __init__(self, **kwargs): + super().__init__(**kwargs) @classmethod def get_models(cls, generate_from_null: bool = False) -> List[str]: @@ -327,93 +163,3 @@ def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwar pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) - - def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) -> None: - """ - Method to update the batchnorm of a neural network on small datasets when it was pre-trained - - :param x: Training data. - :param batch_size: Size of batches. - :param nb_epochs: How many times to forward pass over the input data - """ - import torch - - self.model.train() - - ind = np.arange(len(x)) - num_batch = int(len(x) / float(batch_size)) - - with torch.no_grad(): - for _ in tqdm(range(nb_epochs)): - for m in tqdm(range(num_batch)): - i_batch = torch.from_numpy(np.copy(x[ind[m * batch_size : (m + 1) * batch_size]])).to(self.device) - i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) - _ = self.model(i_batch) - - def eval_and_certify( - self, - x: np.ndarray, - y: np.ndarray, - size_to_certify: int, - batch_size: int = 128, - verbose: bool = True, - ) -> Tuple["torch.Tensor", "torch.Tensor"]: - """ - Evaluates the ViT's normal and certified performance over the supplied data. - - :param x: Evaluation data. - :param y: Evaluation labels. - :param size_to_certify: The size of the patch to certify against. - If not provided will default to the ablation size. - :param batch_size: batch size when evaluating. - :param verbose: If to display the progress bar - :return: The accuracy and certified accuracy over the dataset - """ - import torch - - self.model.eval() - y = check_and_transform_label_format(y, nb_classes=self.nb_classes) - - # Apply preprocessing - x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) - - # Check label shape - y_preprocessed = self.reduce_labels(y_preprocessed) - - num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) - pbar = tqdm(range(num_batch), disable=not verbose) - accuracy = torch.tensor(0.0).to(self._device) - cert_sum = torch.tensor(0.0).to(self._device) - n_samples = 0 - - with torch.no_grad(): - for m in pbar: - if m == (num_batch - 1): - i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size :])).to(self._device) - o_batch = torch.from_numpy(y_preprocessed[m * batch_size :]).to(self._device) - else: - i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size : (m + 1) * batch_size])).to( - self._device - ) - o_batch = torch.from_numpy(y_preprocessed[m * batch_size : (m + 1) * batch_size]).to(self._device) - - predictions = [] - pred_counts = torch.zeros((len(i_batch), self.nb_classes)).to(self._device) - for pos in range(i_batch.shape[-1]): - ablated_batch = self.ablator.forward(i_batch, column_pos=pos) - - # Perform prediction - model_outputs = self.model(ablated_batch) - pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1)] += 1 - predictions.append(model_outputs) - - _, cert_and_correct, top_predicted_class = self.ablator.certify( - pred_counts, size_to_certify=size_to_certify, label=o_batch - ) - cert_sum += torch.sum(cert_and_correct) - accuracy += torch.sum(top_predicted_class == o_batch) - n_samples += len(cert_and_correct) - - pbar.set_description(f"Normal Acc {accuracy / n_samples:.3f} " f"Cert Acc {cert_sum / n_samples:.3f}") - - return (accuracy / n_samples), (cert_sum / n_samples) diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py index fb8c1795f7..2521de0363 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py @@ -24,7 +24,7 @@ | Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf """ -from typing import Optional, Tuple +from typing import Optional, Union, Tuple import random import numpy as np @@ -110,7 +110,7 @@ def ablate(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: x[:, :, :, column_pos + k :] = 0.0 return x - def forward(self, x: torch.Tensor, column_pos: Optional[int] = None) -> torch.Tensor: + def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None) -> torch.Tensor: """ Forward pass though the ablator. We insert a new channel to keep track of the ablation location. @@ -134,7 +134,7 @@ def forward(self, x: torch.Tensor, column_pos: Optional[int] = None) -> torch.Te return x def certify( - self, pred_counts: torch.Tensor, size_to_certify: int, label: torch.Tensor + self, pred_counts: Union[torch.Tensor, np.ndarray], size_to_certify: int, label: Union[torch.Tensor, np.ndarray] ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Performs certification of the predictions @@ -146,6 +146,11 @@ def certify( the predictions which were certified and also correct, and the most predicted class across the different ablations on the input. """ + if isinstance(pred_counts, np.ndarray): + pred_counts = torch.from_numpy(pred_counts).to(self.device) + + if isinstance(label, np.ndarray): + label = torch.from_numpy(label).to(self.device) num_of_classes = pred_counts.shape[-1] diff --git a/dev.py b/dev.py index afe1631813..763700749a 100644 --- a/dev.py +++ b/dev.py @@ -32,25 +32,86 @@ def get_cifar_data(): return (x_train, y_train), (x_test, y_test) -(x_train, y_train), (x_test, y_test) = get_cifar_data() - -art_model = PyTorchDeRandomizedSmoothing(model='vit_small_patch16_224', - loss=torch.nn.CrossEntropyLoss(), - optimizer=torch.optim.SGD, - optimizer_params={"lr": 0.01}, - input_shape=(3, 32, 32), - nb_classes=10, - ablation_size=4, - replace_last_layer=True, - load_pretrained=True,) - -scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1) -art_model.fit(x_train, y_train, - nb_epochs=30, - update_batchnorm=True, - scheduler=scheduler, - transform=transforms.Compose([transforms.RandomHorizontalFlip()])) - -# torch.save(art_model.model.state_dict(), 'trained.pt') -# art_model.model.load_state_dict(torch.load('trained.pt')) -art_model.eval_and_certify(x_test, y_test, size_to_certify=4) +def vit_dev(): + (x_train, y_train), (x_test, y_test) = get_cifar_data() + + art_model = PyTorchDeRandomizedSmoothing(model='vit_small_patch16_224', + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + replace_last_layer=True, + load_pretrained=True) + # art_model.predict(x_train[0:10]) + + scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1) + art_model.fit(x_train, y_train, + nb_epochs=30, + update_batchnorm=False, + scheduler=scheduler, + transform=transforms.Compose([transforms.RandomHorizontalFlip()])) + + torch.save(art_model.model.state_dict(), 'trained_refactor.pt') + art_model.model.load_state_dict(torch.load('trained_refactor.pt')) + art_model.eval_and_certify(x_test, y_test, size_to_certify=4) + + +def cnn_dev(): + class CIFARModel(torch.nn.Module): + + def __init__(self, number_of_classes: int): + super(CIFARModel, self).__init__() + + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + self.conv_1 = torch.nn.Conv2d(in_channels=6, + out_channels=32, + kernel_size=4, + stride=2) + + self.conv_2 = torch.nn.Conv2d(in_channels=32, + out_channels=32, + kernel_size=4, + stride=1) + + self.fc1 = torch.nn.Linear(in_features=4608, out_features=number_of_classes) + + self.relu = torch.nn.ReLU() + + def forward(self, x: "torch.Tensor") -> "torch.Tensor": + """ + Computes the forward pass though the neural network + :param x: input data of shape (batch size, N features) + :return: model prediction + """ + x = self.relu(self.conv_1(x)) + x = self.relu(self.conv_2(x)) + x = torch.flatten(x, 1) + return self.fc1(x) + + model = CIFARModel(number_of_classes=10) + (x_train, y_train), (x_test, y_test) = get_cifar_data() + + art_model = PyTorchDeRandomizedSmoothing(model=model, + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD(model.parameters(), lr=0.001), + input_shape=(3, 32, 32), + nb_classes=10, + ablation_type='column', + ablation_size=4, + threshold=0.1, + logits=False) + + scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1) + art_model.predict(x_train[0:10]) + print(art_model) + + art_model.fit(x_train, y_train, + nb_epochs=30, + update_batchnorm=True, + scheduler=scheduler) + +vit_dev() +# cnn_dev() \ No newline at end of file From 6b3dcf680332ffdffa2969df497e49bc26985ba0 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 28 Jun 2023 13:10:16 +0100 Subject: [PATCH 25/55] update test script Signed-off-by: GiulioZizzo --- dev.py | 75 ++++++--- .../certification/test_smooth_vit.py | 156 +++++++++++++++++- 2 files changed, 204 insertions(+), 27 deletions(-) diff --git a/dev.py b/dev.py index 763700749a..984efe8a56 100644 --- a/dev.py +++ b/dev.py @@ -1,6 +1,6 @@ import torch import ssl -ssl._create_default_https_context = ssl._create_unverified_context +# ssl._create_default_https_context = ssl._create_unverified_context from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing import numpy as np from torchvision import datasets @@ -31,6 +31,26 @@ def get_cifar_data(): return (x_train, y_train), (x_test, y_test) +def get_mnist_data(): + """ + Get the MNIST data. + """ + train_set = datasets.MNIST('./data', train=True, download=True) + test_set = datasets.MNIST('./data', train=False, download=True) + + x_train = train_set.data.numpy().astype(np.float32) + y_train = train_set.targets.numpy() + + x_test = test_set.data.numpy().astype(np.float32) + y_test = test_set.targets.numpy() + + x_train = np.expand_dims(x_train, axis=1) + x_test = np.expand_dims(x_test, axis=1) + + x_train = x_train / 255.0 + x_test = x_test / 255.0 + + return (x_train, y_train), (x_test, y_test) def vit_dev(): (x_train, y_train), (x_test, y_test) = get_cifar_data() @@ -59,24 +79,27 @@ def vit_dev(): def cnn_dev(): - class CIFARModel(torch.nn.Module): + class MNISTModel(torch.nn.Module): def __init__(self, number_of_classes: int): - super(CIFARModel, self).__init__() + super(MNISTModel, self).__init__() self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.conv_1 = torch.nn.Conv2d(in_channels=6, - out_channels=32, + self.conv_1 = torch.nn.Conv2d(in_channels=2, + out_channels=64, kernel_size=4, - stride=2) + stride=2, + padding=1) - self.conv_2 = torch.nn.Conv2d(in_channels=32, - out_channels=32, + self.conv_2 = torch.nn.Conv2d(in_channels=64, + out_channels=128, kernel_size=4, - stride=1) + stride=2, padding=1) - self.fc1 = torch.nn.Linear(in_features=4608, out_features=number_of_classes) + self.fc1 = torch.nn.Linear(in_features=128*7*7, out_features=500) + self.fc2 = torch.nn.Linear(in_features=500, out_features=100) + self.fc3 = torch.nn.Linear(in_features=100, out_features=10) self.relu = torch.nn.ReLU() @@ -89,29 +112,31 @@ def forward(self, x: "torch.Tensor") -> "torch.Tensor": x = self.relu(self.conv_1(x)) x = self.relu(self.conv_2(x)) x = torch.flatten(x, 1) - return self.fc1(x) + x = self.relu(self.fc1(x)) + x = self.relu(self.fc2(x)) + x = self.fc3(x) + return x - model = CIFARModel(number_of_classes=10) - (x_train, y_train), (x_test, y_test) = get_cifar_data() + model = MNISTModel(number_of_classes=10) + # (x_train, y_train), (x_test, y_test) = get_cifar_data() + (x_train, y_train), (x_test, y_test) = get_mnist_data() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005) art_model = PyTorchDeRandomizedSmoothing(model=model, loss=torch.nn.CrossEntropyLoss(), - optimizer=torch.optim.SGD(model.parameters(), lr=0.001), - input_shape=(3, 32, 32), + optimizer=optimizer, + input_shape=(1, 28, 28), nb_classes=10, ablation_type='column', - ablation_size=4, - threshold=0.1, - logits=False) + ablation_size=2, + threshold=0.3, + logits=True) - scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1) - art_model.predict(x_train[0:10]) - print(art_model) + scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[200], gamma=0.1) art_model.fit(x_train, y_train, - nb_epochs=30, - update_batchnorm=True, + nb_epochs=400, scheduler=scheduler) -vit_dev() -# cnn_dev() \ No newline at end of file +# vit_dev() +cnn_dev() \ No newline at end of file diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index f1c9566d73..7649b0536f 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -66,7 +66,7 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): import torch device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - from art.estimators.certification.derandomized_smoothing.vision_transformers.smooth_vit import ColumnAblator + from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator try: cifar_data = fix_get_cifar10_data[0] @@ -75,6 +75,7 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): ablation_size=4, channels_first=True, to_reshape=False, # do not upsample initially + mode='ViT', original_shape=(3, 32, 32), output_shape=(3, 224, 224), ) @@ -101,6 +102,7 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): ablation_size=4, channels_first=True, to_reshape=True, + mode='ViT', original_shape=(3, 32, 32), output_shape=(3, 224, 224), ) @@ -161,13 +163,14 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 """ Check that ... """ - from art.estimators.certification.derandomized_smoothing.vision_transformers.smooth_vit import ColumnAblator + from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator import torch try: col_ablator = ColumnAblator( ablation_size=4, channels_first=True, + mode='ViT', to_reshape=True, # do not upsample initially original_shape=(3, 32, 32), output_shape=(3, 224, 224), @@ -183,6 +186,146 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 except ARTTestException as e: art_warning(e) +def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10_data): + """ + Assert implementations matches original with a forward pass through the same model architecture. + Note, there are some differences in architecture between the same model names. + We use vit_base_patch16_224 which matches. + """ + import torch + import os + import sys + + from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing + + os.system("git clone https://github.com/MadryLab/smoothed-vit") + sys.path.append('smoothed-vit/src/utils/') + + # Original MaskProcessor used ones_mask = torch.cat([torch.cuda.IntTensor(1).fill_(0), ones_mask]).unsqueeze(0) + # which is not compatible with non-cuda torch as is found when running tests on github. + # Hence, replace the class with the same code, but having changed to + # ones_mask = torch.cat([torch.IntTensor(1).fill_(0), ones_mask]).unsqueeze(0) + # Original licence: + """ + MIT License + + Copyright (c) 2021 Madry Lab + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + """ + + class MaskProcessor(torch.nn.Module): + def __init__(self, patch_size=16): + super().__init__() + self.avg_pool = torch.nn.AvgPool2d(patch_size) + + def forward(self, ones_mask): + B = ones_mask.shape[0] + ones_mask = ones_mask[0].unsqueeze(0) # take the first mask + ones_mask = self.avg_pool(ones_mask)[0] + ones_mask = torch.where(ones_mask.view(-1) > 0)[0] + 1 + ones_mask = torch.cat([torch.IntTensor(1).fill_(0), ones_mask]).unsqueeze(0) + ones_mask = ones_mask.expand(B, -1) + return ones_mask + + from custom_models import preprocess + preprocess.MaskProcessor = MaskProcessor + + from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator + from custom_models.vision_transformer import vit_small_patch16_224, vit_base_patch16_224 + + cifar_data = fix_get_cifar10_data[0][:50] + cifar_labels = fix_get_cifar10_data[1][:50] + + ''' + timm config for: + def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) + model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + + def vit_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: + """ ViT-Small (ViT-S/16) + """ + model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) + model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + smooth repo config for: + def vit_small_patch16_224(pretrained=False, **kwargs): + if pretrained: + # NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model + kwargs.setdefault('qk_scale', 768 ** -0.5) + model = VisionTransformer(patch_size=16, embed_dim=768, depth=8, num_heads=8, mlp_ratio=3., **kwargs) + model.default_cfg = default_cfgs['vit_small_patch16_224'] + if pretrained: + load_pretrained( + model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3), filter_fn=_conv_filter) + return model + + + def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) + model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + ''' + + art_model = PyTorchDeRandomizedSmoothing( + model="vit_base_patch16_224", + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + load_pretrained=True, + replace_last_layer=True, + verbose=False, + ) + art_sd = art_model.model.state_dict() + madry_vit = vit_base_patch16_224(pretrained=False) + madry_vit.head = torch.nn.Linear(madry_vit.head.in_features, 10) + + madry_vit.load_state_dict(art_sd) + + col_ablator = ColumnAblator( + ablation_size=4, + channels_first=True, + to_reshape=True, + mode='ViT', + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + ) + + ablated = col_ablator.forward(cifar_data, column_pos=10) + + madry_preds = madry_vit(ablated) + art_preds = art_model.model(ablated) + assert torch.allclose(madry_preds, art_preds, rtol=1e-04, atol=1e-04) @pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") def test_equivalence(fix_get_cifar10_data): @@ -264,6 +407,15 @@ def embedder(cls, x, pos_embed, cls_token): """ NB, original code used the pos embed from the divit rather than vit (which we pull from our model) which we use here. + + From timm vit: + self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02) + + From timm dvit: + self.pos_embed = nn.Parameter(torch.zeros(1, self.patch_embed.num_patches + self.num_prefix_tokens, self.embed_dim)) + + From repo: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) """ x = torch.cat((cls_token.expand(x.shape[0], -1, -1), x), dim=1) return x + pos_embed From 3f3cea632673bd59f699ef298e83d5c1c9953beb Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 12 Jul 2023 14:19:33 +0100 Subject: [PATCH 26/55] splitting out pytorch functionalities Signed-off-by: GiulioZizzo --- .../derandomized_smoothing_pytorch.py | 190 ++++++++++++++ .../derandomized_smoothing/pytorch.py | 245 ++++++++---------- .../vision_transformers/smooth_vit.py | 11 +- 3 files changed, 309 insertions(+), 137 deletions(-) create mode 100644 art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py new file mode 100644 index 0000000000..cf2bd9748c --- /dev/null +++ b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py @@ -0,0 +1,190 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +""" +This module implements Certified Patch Robustness via Smoothed Vision Transformers + +| Paper link Accepted version: + https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf + +| Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf +""" + +from typing import Optional, Union, Tuple +import random + +import numpy as np +import torch + + +class UpSampler(torch.nn.Module): + """ + Resizes datasets to the specified size. + Usually for upscaling datasets like CIFAR to Imagenet format + """ + + def __init__(self, input_size: int, final_size: int) -> None: + """ + Creates an upsampler to make the supplied data match the pre-trained ViT format + + :param input_size: Size of the current input data + :param final_size: Desired final size + """ + super().__init__() + self.upsample = torch.nn.Upsample(scale_factor=final_size / input_size) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Forward pass though the upsampler. + + :param x: Input data + :return: The upsampled input data + """ + return self.upsample(x) + + +class ColumnAblator(torch.nn.Module): + """ + Pure Pytorch implementation of stripe/column ablation. + """ + + def __init__( + self, + ablation_size: int, + channels_first: bool, + mode, + to_reshape: bool, + original_shape: Optional[Tuple] = None, + output_shape: Optional[Tuple] = None, + algorithm: str = 'salman2021', + device_type: str = "gpu", + ): + """ + Creates a column ablator + + :param ablation_size: The size of the column we will retain. + :param channels_first: If the input is in channels first format. Currently required to be True. + :param to_reshape: If the input requires reshaping. + :param original_shape: Original shape of the input. + :param output_shape: Input shape expected by the ViT. Usually means upscaling the input to 224 x 224. + """ + super().__init__() + + self.ablation_size = ablation_size + self.channels_first = channels_first + self.to_reshape = to_reshape + self.add_ablation_mask = False + self.additional_channels = False + self.algorithm = algorithm + self.original_shape = original_shape + + if self.algorithm == 'levine2020': + self.additional_channels = True + if self.algorithm == 'salman2021' and mode == 'ViT': + self.add_ablation_mask = True + + if device_type == "cpu" or not torch.cuda.is_available(): + self.device = torch.device("cpu") + else: # pragma: no cover + cuda_idx = torch.cuda.current_device() + self.device = torch.device(f"cuda:{cuda_idx}") + + if original_shape is not None and output_shape is not None: + self.upsample = UpSampler(input_size=original_shape[1], final_size=output_shape[1]) + + def ablate(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: + """ + Ablates the input colum wise + + :param x: Input data + :param column_pos: The start position of the albation + :return: The ablated input with 0s where the ablation occurred + """ + k = self.ablation_size + if column_pos + k > x.shape[-1]: + x[:, :, :, (column_pos + k) % x.shape[-1] : column_pos] = 0.0 + else: + x[:, :, :, :column_pos] = 0.0 + x[:, :, :, column_pos + k :] = 0.0 + return x + + def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None) -> torch.Tensor: + """ + Forward pass though the ablator. We insert a new channel to keep track of the ablation location. + + :param x: Input data + :param column_pos: The start position of the albation + :return: The albated input with an extra channel indicating the location of the ablation + """ + + if x.shape[1] != self.original_shape[0]: + raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") + + if column_pos is None: + column_pos = random.randint(0, x.shape[3]) + + if isinstance(x, np.ndarray): + x = torch.from_numpy(x).to(self.device) + + if self.add_ablation_mask: + ones = torch.torch.ones_like(x[:, 0:1, :, :]).to(self.device) + x = torch.cat([x, ones], dim=1) + + if self.additional_channels: + x = torch.cat([x, 1.0 - x], dim=1) + + x = self.ablate(x, column_pos=column_pos) + + if self.to_reshape: + x = self.upsample(x) + return x + + def certify(self, + pred_counts: Union[torch.Tensor, np.ndarray], + size_to_certify: int, + label: Union[torch.Tensor, np.ndarray] = None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Performs certification of the predictions + + :param pred_counts: The model predictions over the ablated data. + :param size_to_certify: The patch size we wish to check certification against + :param label: The ground truth labels + :return: A tuple consisting of: the certified predictions, + the predictions which were certified and also correct, + and the most predicted class across the different ablations on the input. + """ + + if isinstance(pred_counts, np.ndarray): + pred_counts = torch.from_numpy(pred_counts).to(self.device) + + if isinstance(label, np.ndarray): + label = torch.from_numpy(label).to(self.device) + + num_of_classes = pred_counts.shape[-1] + + top_class_counts, top_predicted_class = pred_counts.kthvalue(num_of_classes, dim=1) + second_class_counts, second_predicted_class = pred_counts.kthvalue(num_of_classes - 1, dim=1) + + cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) + + cert_and_correct = cert & (label == top_predicted_class) + + if self.algorithm == 'levine2020': + tie_break_certs = ((top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1))\ + & (top_predicted_class < second_predicted_class) + cert = torch.logical_or(cert, tie_break_certs) + return cert, cert_and_correct, top_predicted_class diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index f6064b2c01..cf6e787bea 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -44,7 +44,6 @@ from art.config import ART_NUMPY_DTYPE from art.estimators.classification.pytorch import PyTorchClassifier from art.estimators.certification.derandomized_smoothing.vision_transformers.pytorch import PyTorchSmoothedViT -from art.estimators.certification.derandomized_smoothing.derandomized_smoothing import DeRandomizedSmoothingMixin from art.utils import check_and_transform_label_format if TYPE_CHECKING: @@ -59,71 +58,7 @@ logger = logging.getLogger(__name__) -class PyTorchDeRandomizedSmoothingCNN(DeRandomizedSmoothingMixin): - """ - Implementation of (De)Randomized Smoothing applied to classifier predictions as introduced - in Levine et al. (2020). - - | Paper link: https://arxiv.org/abs/2002.10733 - """ - - def __init__(self, **kwargs): - """ - Create a derandomized smoothing classifier. - - :param model: PyTorch model. The output of the model can be logits, probabilities or anything else. Logits - output should be preferred where possible to ensure attack efficiency. - :param loss: The loss function for which to compute gradients for training. The target label must be raw - categorical, i.e. not converted to one-hot encoding. - :param input_shape: The shape of one input instance. - :param nb_classes: The number of classes of the model. - :param ablation_type: The type of ablation to perform, must be either "column" or "block" - :param ablation_size: The size of the data portion to retain after ablation. Will be a column of size N for - "column" ablation type or a NxN square for ablation of type "block" - :param threshold: The minimum threshold to count a prediction. - :param logits: if the model returns logits or normalized probabilities - :param optimizer: The optimizer used to train the classifier. - :param channels_first: Set channels first or last. - :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and - maximum values allowed for features. If floats are provided, these will be used as the range of all - features. If arrays are provided, each value will be considered the bound for a feature, thus - the shape of clip values needs to match the total number of features. - :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier. - :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier. - :param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be - used for data preprocessing. The first value will be subtracted from the input. The input will then - be divided by the second one. - :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. - """ - super().__init__(**kwargs) - - def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: - import torch - - x = x.astype(ART_NUMPY_DTYPE) - outputs = PyTorchClassifier.predict(self, x=x, batch_size=batch_size, training_mode=training_mode, **kwargs) - - if not self.logits: - return np.asarray((outputs >= self.threshold)) - return np.asarray( - (torch.nn.functional.softmax(torch.from_numpy(outputs), dim=1) >= self.threshold).type(torch.int) - ) - - def predict( - self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs - ) -> np.ndarray: # type: ignore - """ - Perform prediction of the given classifier for a batch of inputs, taking an expectation over transformations. - - :param x: Input samples. - :param batch_size: Batch size. - :param training_mode: if to run the classifier in training mode - :return: Array of predictions of shape `(nb_inputs, nb_classes)`. - """ - return DeRandomizedSmoothingMixin.predict(self, x, batch_size=batch_size, training_mode=training_mode, **kwargs) - - -class PyTorchDeRandomizedSmoothing(PyTorchDeRandomizedSmoothingCNN, PyTorchSmoothedViT, PyTorchClassifier): +class PyTorchDeRandomizedSmoothing(PyTorchSmoothedViT, PyTorchClassifier): """ Interface class for the two De-randomized smoothing approaches supported by ART for pytorch. @@ -141,6 +76,7 @@ def __init__( input_shape: Tuple[int, ...], nb_classes: int, ablation_size: int, + algorithm: str = 'salman2021', replace_last_layer: Optional[bool] = None, drop_tokens: bool = True, load_pretrained: bool = True, @@ -161,15 +97,16 @@ def __init__( """ Create a smoothed classifier. - :param model: To run Salman et al. (2021): - Either a string specifying which ViT architecture to load, or a vision transformer already - created with the Pytorch Image Models (timm) library. - To run Levine et al. (2020) provide a regular pytorch model + :param model: Either a CNN or a VIT. For a ViT supply a string specifying which ViT architecture to load from + the ViT library, or a vision transformer already created with the Pytorch Image Models (timm) library. + To run Levine et al. (2020) provide a regular pytorch model. :param loss: The loss function for which to compute gradients for training. The target label must be raw - categorical, i.e. not converted to one-hot encoding. + categorical, i.e. not converted to one-hot encoding. :param input_shape: The shape of one input instance. :param nb_classes: The number of classes of the model. :param ablation_size: The size of the data portion to retain after ablation. + :param algorithm: Either 'salman2021' or 'levine2020'. For salman2021 we support ViTs and CNNs. For levine2020 + there is only CNN support. :param replace_last_layer: ViT Specific. If to replace the last layer of the ViT with a fresh layer matching the number of classes for the dataset to be examined. Needed if going from the pre-trained imagenet models to fine-tune @@ -196,15 +133,14 @@ def __init__( """ import torch + print(algorithm) self.mode = None - if importlib.util.find_spec("timm") is not None: + if importlib.util.find_spec("timm") is not None and algorithm == 'salman2021': from timm.models.vision_transformer import VisionTransformer - from art.estimators.certification.derandomized_smoothing.vision_transformers.smooth_vit import ColumnAblator if isinstance(model, (VisionTransformer, str)): import timm - from timm.models.vision_transformer import VisionTransformer from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT if replace_last_layer is None: @@ -279,72 +215,57 @@ def __init__( ) self.to_reshape = True - - if optimizer is None or isinstance(optimizer, torch.optim.Optimizer): - super().__init__( - model=model, - loss=loss, - input_shape=input_shape, - nb_classes=nb_classes, - optimizer=optimizer, - channels_first=channels_first, - clip_values=clip_values, - preprocessing_defences=preprocessing_defences, - postprocessing_defences=postprocessing_defences, - preprocessing=preprocessing, - device_type=device_type, - ablation_type="column", - ablation_size=ablation_size, - threshold=0.0, - logits=True, - ) - else: - raise ValueError("Error occurred in optimizer creation") - - self.ablation_size = (ablation_size,) - - if verbose: - logger.info(self.model) - - self.ablator = ColumnAblator( - ablation_size=ablation_size, - channels_first=True, - to_reshape=self.to_reshape, - original_shape=input_shape, - output_shape=model.default_cfg["input_size"], - device_type=device_type, - ) + output_shape = model.default_cfg["input_size"] # set the method back to avoid unexpected side effects later on should timm need to be reused. timm.models.vision_transformer._create_vision_transformer = tmp_func - self.mode = "ViT" else: if isinstance(model, torch.nn.Module): - if ablation_type is None or threshold is None or logits is None: - raise ValueError( - "If using CNN please specify if the model returns logits, " - " the prediction threshold, and ablation type" - ) - - super().__init__( - model=model, - loss=loss, - input_shape=input_shape, - nb_classes=nb_classes, - optimizer=optimizer, - channels_first=channels_first, - clip_values=clip_values, - preprocessing_defences=preprocessing_defences, - postprocessing_defences=postprocessing_defences, - preprocessing=preprocessing, - device_type=device_type, - ablation_type=ablation_type, - ablation_size=ablation_size, - threshold=threshold, - logits=logits, - ) + if algorithm == 'levine2020': + if ablation_type is None or threshold is None or logits is None: + raise ValueError( + "If using CNN please specify if the model returns logits, " + " the prediction threshold, and ablation type" + ) self.mode = "CNN" + output_shape = input_shape + + if optimizer is None or isinstance(optimizer, torch.optim.Optimizer): + super().__init__( + model=model, + loss=loss, + input_shape=input_shape, + nb_classes=nb_classes, + optimizer=optimizer, + channels_first=channels_first, + clip_values=clip_values, + preprocessing_defences=preprocessing_defences, + postprocessing_defences=postprocessing_defences, + preprocessing=preprocessing, + device_type=device_type, + ) + else: + raise ValueError("Error occurred in optimizer creation") + + self.threshold = threshold + self.logits = logits + self.ablation_size = (ablation_size,) + self.algorithm = algorithm + if verbose: + logger.info(self.model) + + from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator + self.ablator = ColumnAblator( + ablation_size=ablation_size, + channels_first=True, + to_reshape=self.to_reshape, + original_shape=input_shape, + output_shape=output_shape, + device_type=device_type, + algorithm=algorithm, + mode=self.mode, + ) if self.mode is None: raise ValueError("Model type not recognized.") @@ -492,12 +413,14 @@ def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndar return np.sum(np.argmax(preds, axis=1) == labels) / len(labels) + ''' def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: if self.mode == "ViT": return PyTorchClassifier.predict(self, x, batch_size, training_mode, **kwargs) if self.mode == "CNN": return PyTorchDeRandomizedSmoothingCNN.predict(self, x, batch_size, training_mode, **kwargs) raise ValueError('mode is not ViT or CNN') + ''' def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) -> None: """ @@ -543,7 +466,7 @@ def eval_and_certify( :return: The accuracy and certified accuracy over the dataset """ import torch - if self.mode != 'ViT': + if self.mode != 'ViT': # TODO, adapt for cnn first raise ValueError('Accessing a ViT specific functionality while running in CNN mode') self.model.eval() @@ -591,3 +514,57 @@ def eval_and_certify( pbar.set_description(f"Normal Acc {accuracy / n_samples:.3f} " f"Cert Acc {cert_sum / n_samples:.3f}") return (accuracy / n_samples), (cert_sum / n_samples) + + def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: + import torch + + x = x.astype(ART_NUMPY_DTYPE) + outputs = PyTorchClassifier.predict(self, x=x, batch_size=batch_size, training_mode=training_mode, **kwargs) + + if self.algorithm == 'levine2020': + if not self.logits: + return np.asarray((outputs >= self.threshold)) + return np.asarray( + (torch.nn.functional.softmax(torch.from_numpy(outputs), dim=1) >= self.threshold).type(torch.int) + ) + return outputs + + def predict(self, x, batch_size, training_mode, **kwargs): + if self._channels_first: + columns_in_data = x.shape[-1] + rows_in_data = x.shape[-2] + else: + columns_in_data = x.shape[-2] + rows_in_data = x.shape[-3] + + if self.ablation_type in {"column", "row"}: + if self.ablation_type == "column": + ablate_over_range = columns_in_data + else: + # image will be transposed, so loop over the number of rows + ablate_over_range = rows_in_data + + for ablation_start in range(ablate_over_range): + ablated_x = self.ablator.forward(np.copy(x), column_pos=ablation_start) + if ablation_start == 0: + preds = self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + else: + preds += self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + elif self.ablation_type == "block": + for xcorner in range(rows_in_data): + for ycorner in range(columns_in_data): + ablated_x = self.ablator.forward(np.copy(x), row_pos=xcorner, column_pos=ycorner) + if ycorner == 0 and xcorner == 0: + preds = self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + else: + preds += self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + + return preds diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py index 2521de0363..b29115af23 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py @@ -69,6 +69,7 @@ def __init__( to_reshape: bool = False, original_shape: Optional[Tuple] = None, output_shape: Optional[Tuple] = None, + add_ablation_mask: bool = True, device_type: str = "gpu", ): """ @@ -84,6 +85,8 @@ def __init__( self.ablation_size = ablation_size self.channels_first = channels_first self.to_reshape = to_reshape + self.expected_input_channels = 1 + self.add_ablation_mask = add_ablation_mask if device_type == "cpu" or not torch.cuda.is_available(): self.device = torch.device("cpu") @@ -118,7 +121,7 @@ def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] :param column_pos: The start position of the albation :return: The albated input with an extra channel indicating the location of the ablation """ - assert x.shape[1] == 3 + assert x.shape[1] == self.expected_input_channels if column_pos is None: column_pos = random.randint(0, x.shape[3]) @@ -126,8 +129,10 @@ def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] if isinstance(x, np.ndarray): x = torch.from_numpy(x).to(self.device) - ones = torch.torch.ones_like(x[:, 0:1, :, :]).to(self.device) - x = torch.cat([x, ones], dim=1) + if self.add_ablation_mask: + ones = torch.torch.ones_like(x[:, 0:1, :, :]).to(self.device) + x = torch.cat([x, ones], dim=1) + x = self.ablate(x, column_pos=column_pos) if self.to_reshape: x = self.upsample(x) From d1857b42b6301049c9fba612283accf4994d252c Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 12 Jul 2023 14:44:06 +0100 Subject: [PATCH 27/55] updating dev testing file and tests for vit Signed-off-by: GiulioZizzo --- dev.py | 151 ++++++++++++------ .../certification/test_smooth_vit.py | 107 ++++++++++++- 2 files changed, 203 insertions(+), 55 deletions(-) diff --git a/dev.py b/dev.py index 984efe8a56..b31be5abca 100644 --- a/dev.py +++ b/dev.py @@ -78,59 +78,108 @@ def vit_dev(): art_model.eval_and_certify(x_test, y_test, size_to_certify=4) -def cnn_dev(): - class MNISTModel(torch.nn.Module): - - def __init__(self, number_of_classes: int): - super(MNISTModel, self).__init__() - - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - self.conv_1 = torch.nn.Conv2d(in_channels=2, - out_channels=64, - kernel_size=4, - stride=2, - padding=1) - - self.conv_2 = torch.nn.Conv2d(in_channels=64, - out_channels=128, - kernel_size=4, - stride=2, padding=1) - - self.fc1 = torch.nn.Linear(in_features=128*7*7, out_features=500) - self.fc2 = torch.nn.Linear(in_features=500, out_features=100) - self.fc3 = torch.nn.Linear(in_features=100, out_features=10) - - self.relu = torch.nn.ReLU() - - def forward(self, x: "torch.Tensor") -> "torch.Tensor": - """ - Computes the forward pass though the neural network - :param x: input data of shape (batch size, N features) - :return: model prediction - """ - x = self.relu(self.conv_1(x)) - x = self.relu(self.conv_2(x)) - x = torch.flatten(x, 1) - x = self.relu(self.fc1(x)) - x = self.relu(self.fc2(x)) - x = self.fc3(x) - return x - - model = MNISTModel(number_of_classes=10) +def cnn_dev(algo='salman2021'): + + assert algo in ['levine2020', 'salman2021'] + + if algo == 'salman2021': + class MNISTModel(torch.nn.Module): + + def __init__(self): + super(MNISTModel, self).__init__() + + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + self.conv_1 = torch.nn.Conv2d(in_channels=1, + out_channels=64, + kernel_size=4, + stride=2, + padding=1) + + self.conv_2 = torch.nn.Conv2d(in_channels=64, + out_channels=128, + kernel_size=4, + stride=2, padding=1) + + self.fc1 = torch.nn.Linear(in_features=128*7*7, out_features=500) + self.fc2 = torch.nn.Linear(in_features=500, out_features=100) + self.fc3 = torch.nn.Linear(in_features=100, out_features=10) + + self.relu = torch.nn.ReLU() + + def forward(self, x: "torch.Tensor") -> "torch.Tensor": + """ + Computes the forward pass though the neural network + :param x: input data of shape (batch size, N features) + :return: model prediction + """ + x = self.relu(self.conv_1(x)) + x = self.relu(self.conv_2(x)) + x = torch.flatten(x, 1) + x = self.relu(self.fc1(x)) + x = self.relu(self.fc2(x)) + x = self.fc3(x) + return x + else: + class MNISTModel(torch.nn.Module): + + def __init__(self): + super(MNISTModel, self).__init__() + + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + self.conv_1 = torch.nn.Conv2d(in_channels=2, + out_channels=64, + kernel_size=4, + stride=2, + padding=1) + + self.conv_2 = torch.nn.Conv2d(in_channels=64, + out_channels=128, + kernel_size=4, + stride=2, padding=1) + + self.fc1 = torch.nn.Linear(in_features=128*7*7, out_features=500) + self.fc2 = torch.nn.Linear(in_features=500, out_features=100) + self.fc3 = torch.nn.Linear(in_features=100, out_features=10) + + self.relu = torch.nn.ReLU() + + def forward(self, x: "torch.Tensor") -> "torch.Tensor": + x = self.relu(self.conv_1(x)) + x = self.relu(self.conv_2(x)) + x = torch.flatten(x, 1) + x = self.relu(self.fc1(x)) + x = self.relu(self.fc2(x)) + x = self.fc3(x) + return x + + model = MNISTModel() # (x_train, y_train), (x_test, y_test) = get_cifar_data() (x_train, y_train), (x_test, y_test) = get_mnist_data() optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005) - art_model = PyTorchDeRandomizedSmoothing(model=model, - loss=torch.nn.CrossEntropyLoss(), - optimizer=optimizer, - input_shape=(1, 28, 28), - nb_classes=10, - ablation_type='column', - ablation_size=2, - threshold=0.3, - logits=True) + if algo == 'salman2021': + art_model = PyTorchDeRandomizedSmoothing(model=model, + loss=torch.nn.CrossEntropyLoss(), + optimizer=optimizer, + input_shape=(1, 28, 28), + nb_classes=10, + ablation_type='column', + ablation_size=2, + algorithm=algo, + logits=True) + else: + art_model = PyTorchDeRandomizedSmoothing(model=model, + loss=torch.nn.CrossEntropyLoss(), + optimizer=optimizer, + input_shape=(1, 28, 28), + nb_classes=10, + ablation_type='column', + ablation_size=2, + algorithm=algo, + threshold=0.3, + logits=True) scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[200], gamma=0.1) @@ -138,5 +187,5 @@ def forward(self, x: "torch.Tensor") -> "torch.Tensor": nb_epochs=400, scheduler=scheduler) -# vit_dev() -cnn_dev() \ No newline at end of file +vit_dev() +# cnn_dev() \ No newline at end of file diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index 7649b0536f..557983b80e 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -186,6 +186,7 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 except ARTTestException as e: art_warning(e) +@pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10_data): """ Assert implementations matches original with a forward pass through the same model architecture. @@ -197,6 +198,11 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 import sys from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing + from pathlib import Path + import shutil + + # if os.path.exists('smoothed-vit'): + # shutil.rmtree('smoothed-vit') os.system("git clone https://github.com/MadryLab/smoothed-vit") sys.path.append('smoothed-vit/src/utils/') @@ -229,7 +235,7 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ - + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class MaskProcessor(torch.nn.Module): def __init__(self, patch_size=16): super().__init__() @@ -240,7 +246,7 @@ def forward(self, ones_mask): ones_mask = ones_mask[0].unsqueeze(0) # take the first mask ones_mask = self.avg_pool(ones_mask)[0] ones_mask = torch.where(ones_mask.view(-1) > 0)[0] + 1 - ones_mask = torch.cat([torch.IntTensor(1).fill_(0), ones_mask]).unsqueeze(0) + ones_mask = torch.cat([torch.IntTensor(1).fill_(0).to(device), ones_mask]).unsqueeze(0) ones_mask = ones_mask.expand(B, -1) return ones_mask @@ -311,7 +317,7 @@ def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: madry_vit.head = torch.nn.Linear(madry_vit.head.in_features, 10) madry_vit.load_state_dict(art_sd) - + madry_vit = madry_vit.to(device) col_ablator = ColumnAblator( ablation_size=4, channels_first=True, @@ -327,11 +333,104 @@ def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: art_preds = art_model.model(ablated) assert torch.allclose(madry_preds, art_preds, rtol=1e-04, atol=1e-04) +@pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") +def test_certification_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10_data): + """ + With the forward pass equivalence asserted, we now confirm that the certification functions in the same + way by doing a full end to end prediction and certification test over the data. + """ + import torch + import os + import sys + from torch.utils.data import Dataset + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + class ArgClass: + def __init__(self): + self.certify_patch_size = 4 + self.certify_ablation_size = 4 + self.certify_stride = 1 + self.dataset = 'cifar10' + self.certify_out_dir = './' + self.exp_name = 'tests' + self.certify_mode = 'col' + self.batch_id = None + + class DataSet(Dataset): + def __init__(self, x, y): + self.x = x + self.y = y + + def __len__(self): + return len(self.y) + + def __getitem__(self, idx): + return self.x[idx], self.y[idx] + + from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing + import shutil + from torch.utils.data import DataLoader + + if os.path.exists('smoothed-vit'): + shutil.rmtree('smoothed-vit') + + if os.path.exists('tests'): + shutil.rmtree('tests') + + os.system("git clone https://github.com/MadryLab/smoothed-vit") + sys.path.append('smoothed-vit/src/utils/') + from smoothing import certify + + art_model = PyTorchDeRandomizedSmoothing( + model="vit_small_patch16_224", + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 224, 224), + nb_classes=10, + ablation_size=4, + load_pretrained=True, + replace_last_layer=True, + verbose=False, + ) + + class WrappedModel(torch.nn.Module): + def __init__(self, my_model): + super().__init__() + self.model = my_model + + def forward(self, x): + x = self.model(x) + return x, 'filler_arg' + + cifar_data = torch.from_numpy(fix_get_cifar10_data[0][:100]).to(device) + cifar_labels = torch.from_numpy(fix_get_cifar10_data[1][:100]).to(device) + upsample = torch.nn.Upsample(scale_factor=224 / 32) + cifar_data = upsample(cifar_data) + dataset = DataSet(cifar_data, cifar_labels) + validation_loader = DataLoader(dataset, batch_size=64) + args = ArgClass() + + model = WrappedModel(my_model=art_model.model) + certify(args=args, + model=model, + validation_loader=validation_loader, + store=None) + summary = torch.load('tests/m4_s4_summary.pth') + print('the summary is ', summary) + acc, cert_acc = art_model.eval_and_certify(x=cifar_data.cpu().numpy(), y=cifar_labels.cpu().numpy(), size_to_certify=4) + print('cert_acc ', cert_acc) + print('acc ', acc) + assert cert_acc == summary['cert_acc'] + assert acc == summary['smooth_acc'] + + @pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") def test_equivalence(fix_get_cifar10_data): import torch from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class MadrylabImplementations: """ @@ -384,7 +483,7 @@ def forward(self, ones_mask): ones_mask = ones_mask[0].unsqueeze(0) # take the first mask ones_mask = self.avg_pool(ones_mask)[0] ones_mask = torch.where(ones_mask.view(-1) > 0)[0] + 1 - ones_mask = torch.cat([torch.IntTensor(1).fill_(0), ones_mask]).unsqueeze(0) + ones_mask = torch.cat([torch.IntTensor(1).fill_(0).to(device), ones_mask]).unsqueeze(0) ones_mask = ones_mask.expand(B, -1) return ones_mask From 07842a7413089ff107bd139684a737acfb0fc405 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Mon, 17 Jul 2023 13:51:22 +0000 Subject: [PATCH 28/55] refactor to eval_and_certify. Adding block ablations Signed-off-by: GiulioZizzo --- .../derandomized_smoothing_pytorch.py | 148 +++++++++++++++++- .../derandomized_smoothing/pytorch.py | 77 +++++---- ...{smooth_vit.py => smooth_vit_to_remove.py} | 0 3 files changed, 197 insertions(+), 28 deletions(-) rename art/estimators/certification/derandomized_smoothing/vision_transformers/{smooth_vit.py => smooth_vit_to_remove.py} (100%) diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py index cf2bd9748c..cc3e8328ed 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py @@ -108,7 +108,7 @@ def __init__( def ablate(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: """ - Ablates the input colum wise + Ablates the input column wise :param x: Input data :param column_pos: The start position of the albation @@ -188,3 +188,149 @@ def certify(self, & (top_predicted_class < second_predicted_class) cert = torch.logical_or(cert, tie_break_certs) return cert, cert_and_correct, top_predicted_class + + + +class BlockAblator(torch.nn.Module): + """ + Pure Pytorch implementation of stripe/column ablation. + """ + + def __init__( + self, + ablation_size: int, + channels_first: bool, + mode, + to_reshape: bool, + original_shape: Optional[Tuple] = None, + output_shape: Optional[Tuple] = None, + algorithm: str = 'salman2021', + device_type: str = "gpu", + ): + """ + Creates a column ablator + + :param ablation_size: The size of the column we will retain. + :param channels_first: If the input is in channels first format. Currently required to be True. + :param to_reshape: If the input requires reshaping. + :param original_shape: Original shape of the input. + :param output_shape: Input shape expected by the ViT. Usually means upscaling the input to 224 x 224. + """ + super().__init__() + + self.ablation_size = ablation_size + self.channels_first = channels_first + self.to_reshape = to_reshape + self.add_ablation_mask = False + self.additional_channels = False + self.algorithm = algorithm + self.original_shape = original_shape + + if self.algorithm == 'levine2020': + self.additional_channels = True + if self.algorithm == 'salman2021' and mode == 'ViT': + self.add_ablation_mask = True + + if device_type == "cpu" or not torch.cuda.is_available(): + self.device = torch.device("cpu") + else: # pragma: no cover + cuda_idx = torch.cuda.current_device() + self.device = torch.device(f"cuda:{cuda_idx}") + + if original_shape is not None and output_shape is not None: + self.upsample = UpSampler(input_size=original_shape[1], final_size=output_shape[1]) + + def ablate(self, x: torch.Tensor, column_pos: int, row_pos: int) -> torch.Tensor: + """ + Ablates the input block wise + + :param x: Input data + :param column_pos: The start position of the albation + :param row_pos: The row start position of the albation + :return: The ablated input with 0s where the ablation occurred + """ + k = self.ablation_size + # Column ablations + if column_pos + k > x.shape[-1]: + x[:, :, :, (column_pos + k) % x.shape[-1] : column_pos] = 0.0 + else: + x[:, :, :, :column_pos] = 0.0 + x[:, :, :, column_pos + k :] = 0.0 + + # Row ablations + if row_pos + k > x.shape[-2]: + x[:, :, (row_pos + k) % x.shape[-2] : row_pos, :] = 0.0 + else: + x[:, :, :row_pos, :] = 0.0 + x[:, :, row_pos + k :, :] = 0.0 + return x + + def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None, row_pos: Optional[int] = None) -> torch.Tensor: + """ + Forward pass though the ablator. We insert a new channel to keep track of the ablation location. + + :param x: Input data + :param column_pos: The start position of the albation + :return: The albated input with an extra channel indicating the location of the ablation + """ + + if x.shape[1] != self.original_shape[0]: + raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") + + if column_pos is None: + column_pos = random.randint(0, x.shape[3]) + + if row_pos is None: + row_pos = random.randint(0, x.shape[2]) + + if isinstance(x, np.ndarray): + x = torch.from_numpy(x).to(self.device) + + if self.add_ablation_mask: + ones = torch.torch.ones_like(x[:, 0:1, :, :]).to(self.device) + x = torch.cat([x, ones], dim=1) + + if self.additional_channels: + x = torch.cat([x, 1.0 - x], dim=1) + + x = self.ablate(x, column_pos=column_pos, row_pos=row_pos) + + if self.to_reshape: + x = self.upsample(x) + return x + + def certify(self, + pred_counts: Union[torch.Tensor, np.ndarray], + size_to_certify: int, + label: Union[torch.Tensor, np.ndarray] = None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Performs certification of the predictions + + :param pred_counts: The model predictions over the ablated data. + :param size_to_certify: The patch size we wish to check certification against + :param label: The ground truth labels + :return: A tuple consisting of: the certified predictions, + the predictions which were certified and also correct, + and the most predicted class across the different ablations on the input. + """ + + if isinstance(pred_counts, np.ndarray): + pred_counts = torch.from_numpy(pred_counts).to(self.device) + + if isinstance(label, np.ndarray): + label = torch.from_numpy(label).to(self.device) + + num_of_classes = pred_counts.shape[-1] + + top_class_counts, top_predicted_class = pred_counts.kthvalue(num_of_classes, dim=1) + second_class_counts, second_predicted_class = pred_counts.kthvalue(num_of_classes - 1, dim=1) + + cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1)**2 + + cert_and_correct = cert & (label == top_predicted_class) + + if self.algorithm == 'levine2020': + tie_break_certs = ((top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1))\ + & (top_predicted_class < second_predicted_class) + cert = torch.logical_or(cert, tie_break_certs) + return cert, cert_and_correct, top_predicted_class \ No newline at end of file diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index cf6e787bea..695980d31f 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -215,21 +215,27 @@ def __init__( ) self.to_reshape = True - output_shape = model.default_cfg["input_size"] + output_shape = model.default_cfg["input_size"] # set the method back to avoid unexpected side effects later on should timm need to be reused. timm.models.vision_transformer._create_vision_transformer = tmp_func self.mode = "ViT" else: if isinstance(model, torch.nn.Module): - if algorithm == 'levine2020': - if ablation_type is None or threshold is None or logits is None: - raise ValueError( - "If using CNN please specify if the model returns logits, " - " the prediction threshold, and ablation type" - ) self.mode = "CNN" output_shape = input_shape + self.to_reshape = False + print('We are here!') + + elif algorithm == 'levine2020': + if ablation_type is None or threshold is None or logits is None: + raise ValueError( + "If using CNN please specify if the model returns logits, " + " the prediction threshold, and ablation type" + ) + self.mode = "CNN" + output_shape = input_shape + self.to_reshape = False if optimizer is None or isinstance(optimizer, torch.optim.Optimizer): super().__init__( @@ -255,17 +261,32 @@ def __init__( if verbose: logger.info(self.model) - from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator - self.ablator = ColumnAblator( - ablation_size=ablation_size, - channels_first=True, - to_reshape=self.to_reshape, - original_shape=input_shape, - output_shape=output_shape, - device_type=device_type, - algorithm=algorithm, - mode=self.mode, - ) + from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator, BlockAblator + + if ablation_type == 'column': + self.ablator = ColumnAblator( + ablation_size=ablation_size, + channels_first=True, + to_reshape=self.to_reshape, + original_shape=input_shape, + output_shape=output_shape, + device_type=device_type, + algorithm=algorithm, + mode=self.mode, + ) + elif ablation_type == 'block': + self.ablator = BlockAblator( + ablation_size=ablation_size, + channels_first=True, + to_reshape=self.to_reshape, + original_shape=input_shape, + output_shape=output_shape, + device_type=device_type, + algorithm=algorithm, + mode=self.mode, + ) + else: + raise ValueError(f"ablation_type of {ablation_type} not recognized. Must be either column or block") if self.mode is None: raise ValueError("Model type not recognized.") @@ -344,8 +365,7 @@ def fit( # pylint: disable=W0221 # Train for one epoch for m in pbar: - i_batch = np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]) - i_batch = self.ablator.forward(i_batch) + i_batch = self.ablator.forward(np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]])) if transform is not None and self.mode == "ViT": # VIT specific i_batch = transform(i_batch) @@ -442,8 +462,8 @@ def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) - with torch.no_grad(): for _ in tqdm(range(nb_epochs)): for m in tqdm(range(num_batch)): - i_batch = np.copy(x[ind[m * batch_size : (m + 1) * batch_size]]) - i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) + i_batch = self.ablator.forward(np.copy(x[ind[m * batch_size : (m + 1) * batch_size]]), + column_pos=random.randint(0, x.shape[3])) _ = self.model(i_batch) def eval_and_certify( @@ -466,8 +486,6 @@ def eval_and_certify( :return: The accuracy and certified accuracy over the dataset """ import torch - if self.mode != 'ViT': # TODO, adapt for cnn first - raise ValueError('Accessing a ViT specific functionality while running in CNN mode') self.model.eval() y = check_and_transform_label_format(y, nb_classes=self.nb_classes) @@ -493,15 +511,20 @@ def eval_and_certify( i_batch = np.copy(x_preprocessed[m * batch_size : (m + 1) * batch_size]) o_batch = y_preprocessed[m * batch_size : (m + 1) * batch_size] - predictions = [] pred_counts = np.zeros((len(i_batch), self.nb_classes)) for pos in range(i_batch.shape[-1]): ablated_batch = self.ablator.forward(i_batch, column_pos=pos) # Perform prediction model_outputs = self.model(ablated_batch) - pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1).cpu()] += 1 - predictions.append(model_outputs) + + if self.algorithm == 'levine2020': + if self.logits: + model_outputs = torch.nn.functional.softmax(model_outputs, dim=1) + model_outputs = model_outputs >= self.threshold + pred_counts += model_outputs.cpu().numpy() + else: + pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1).cpu()] += 1 _, cert_and_correct, top_predicted_class = self.ablator.certify( pred_counts, size_to_certify=size_to_certify, label=o_batch diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit_to_remove.py similarity index 100% rename from art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit.py rename to art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit_to_remove.py From 44b2ea78e1724773b73c6e6989e825c4386af53b Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Thu, 20 Jul 2023 12:04:47 +0100 Subject: [PATCH 29/55] adapting tests for cpu only Signed-off-by: GiulioZizzo --- .../derandomized_smoothing/pytorch.py | 3 +- .../certification/test_smooth_vit.py | 60 ++++++++++++++++--- 2 files changed, 52 insertions(+), 11 deletions(-) diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 695980d31f..3e1232d669 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -77,13 +77,13 @@ def __init__( nb_classes: int, ablation_size: int, algorithm: str = 'salman2021', + ablation_type: str = 'column', replace_last_layer: Optional[bool] = None, drop_tokens: bool = True, load_pretrained: bool = True, optimizer: Union[type, "torch.optim.Optimizer", None] = None, optimizer_params: Optional[dict] = None, channels_first: bool = True, - ablation_type: Optional[str] = None, threshold: Optional[float] = None, logits: Optional[bool] = True, clip_values: Optional["CLIP_VALUES_TYPE"] = None, @@ -514,7 +514,6 @@ def eval_and_certify( pred_counts = np.zeros((len(i_batch), self.nb_classes)) for pos in range(i_batch.shape[-1]): ablated_batch = self.ablator.forward(i_batch, column_pos=pos) - # Perform prediction model_outputs = self.model(ablated_batch) diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index 557983b80e..996d6e675b 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -198,7 +198,6 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 import sys from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing - from pathlib import Path import shutil # if os.path.exists('smoothed-vit'): @@ -342,6 +341,8 @@ def test_certification_equivalence(art_warning, fix_get_mnist_data, fix_get_cifa import torch import os import sys + import types + from torch.utils.data import Dataset device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') @@ -371,8 +372,8 @@ def __getitem__(self, idx): import shutil from torch.utils.data import DataLoader - if os.path.exists('smoothed-vit'): - shutil.rmtree('smoothed-vit') + # if os.path.exists('smoothed-vit'): + # shutil.rmtree('smoothed-vit') if os.path.exists('tests'): shutil.rmtree('tests') @@ -395,6 +396,9 @@ def __getitem__(self, idx): ) class WrappedModel(torch.nn.Module): + """ + Original implementation requires to return a tuple. We add a dummy return to satisfy this. + """ def __init__(self, my_model): super().__init__() self.model = my_model @@ -403,12 +407,50 @@ def forward(self, x): x = self.model(x) return x, 'filler_arg' - cifar_data = torch.from_numpy(fix_get_cifar10_data[0][:100]).to(device) - cifar_labels = torch.from_numpy(fix_get_cifar10_data[1][:100]).to(device) + def _cuda(self): + return self + + class MyDataloader(Dataset): + """ + Original implementation made use of .cuda() without device checks. Thus, for cpu only machines + (such as those run for ART CI checks) the test will fail. Here we override .cuda() for the + instances to just return self. + """ + def __init__(self, x, y): + self.x = x + self.y = y + self.bsize = 2 + + def __len__(self): + return 2 + + def __getitem__(self, idx): + if idx >= 2: + raise IndexError + else: + x = self.x[idx*self.bsize:idx*self.bsize+self.bsize] + y = self.y[idx*self.bsize:idx*self.bsize+self.bsize] + + x.cuda = types.MethodType(_cuda, x) + y.cuda = types.MethodType(_cuda, y) + return x, y + + if torch.cuda.is_available(): + num_to_fetch = 100 + else: + num_to_fetch = 4 + + cifar_data = torch.from_numpy(fix_get_cifar10_data[0][:num_to_fetch]).to(device) + cifar_labels = torch.from_numpy(fix_get_cifar10_data[1][:num_to_fetch]).to(device) upsample = torch.nn.Upsample(scale_factor=224 / 32) cifar_data = upsample(cifar_data) - dataset = DataSet(cifar_data, cifar_labels) - validation_loader = DataLoader(dataset, batch_size=64) + + if torch.cuda.is_available(): + dataset = DataSet(cifar_data, cifar_labels) + validation_loader = DataLoader(dataset, batch_size=64) + else: + validation_loader = MyDataloader(cifar_data, cifar_labels) + args = ArgClass() model = WrappedModel(my_model=art_model.model) @@ -421,8 +463,8 @@ def forward(self, x): acc, cert_acc = art_model.eval_and_certify(x=cifar_data.cpu().numpy(), y=cifar_labels.cpu().numpy(), size_to_certify=4) print('cert_acc ', cert_acc) print('acc ', acc) - assert cert_acc == summary['cert_acc'] - assert acc == summary['smooth_acc'] + assert cert_acc == torch.tensor(summary['cert_acc']) + assert acc == torch.tensor(summary['smooth_acc']) @pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") From 877fb3875b0bfc0b97ba1fb85e2d4767198847fe Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 2 Aug 2023 11:40:50 +0000 Subject: [PATCH 30/55] address tiebreak in kthvalue vs argmax Signed-off-by: GiulioZizzo --- .../derandomized_smoothing_pytorch.py | 19 ++++- .../derandomized_smoothing/pytorch.py | 44 ++++++++--- .../certification/test_smooth_vit.py | 77 +++++++++++++------ 3 files changed, 102 insertions(+), 38 deletions(-) diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py index cc3e8328ed..f3658afe05 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py @@ -176,6 +176,13 @@ def certify(self, num_of_classes = pred_counts.shape[-1] + # NB! argmax and kthvalue handle ties between predicted counts differently. + # The original implementation: https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L98 + # uses argmax for the model predictions (later called https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L230) + # and kthvalue for the certified predictions. + # to be consistent with the original implementation we also follow this here. + top_predicted_class_argmax = torch.argmax(pred_counts, dim=1) + top_class_counts, top_predicted_class = pred_counts.kthvalue(num_of_classes, dim=1) second_class_counts, second_predicted_class = pred_counts.kthvalue(num_of_classes - 1, dim=1) @@ -187,8 +194,7 @@ def certify(self, tie_break_certs = ((top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1))\ & (top_predicted_class < second_predicted_class) cert = torch.logical_or(cert, tie_break_certs) - return cert, cert_and_correct, top_predicted_class - + return cert, cert_and_correct, top_predicted_class_argmax class BlockAblator(torch.nn.Module): @@ -320,6 +326,13 @@ def certify(self, if isinstance(label, np.ndarray): label = torch.from_numpy(label).to(self.device) + # NB! argmax and kthvalue handle ties between predicted counts differently. + # The original implementation: https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L145 + # uses argmax for the model predictions (later called https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L230) + # and kthvalue for the certified predictions. + # to be consistent with the original implementation we also follow this here. + top_predicted_class_argmax = torch.argmax(pred_counts, dim=1) + num_of_classes = pred_counts.shape[-1] top_class_counts, top_predicted_class = pred_counts.kthvalue(num_of_classes, dim=1) @@ -333,4 +346,4 @@ def certify(self, tie_break_certs = ((top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1))\ & (top_predicted_class < second_predicted_class) cert = torch.logical_or(cert, tie_break_certs) - return cert, cert_and_correct, top_predicted_class \ No newline at end of file + return cert, cert_and_correct, top_predicted_class_argmax \ No newline at end of file diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 3e1232d669..ea8c0a81e8 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -258,6 +258,8 @@ def __init__( self.logits = logits self.ablation_size = (ablation_size,) self.algorithm = algorithm + self.ablation_type = ablation_type + if verbose: logger.info(self.model) @@ -512,18 +514,36 @@ def eval_and_certify( o_batch = y_preprocessed[m * batch_size : (m + 1) * batch_size] pred_counts = np.zeros((len(i_batch), self.nb_classes)) - for pos in range(i_batch.shape[-1]): - ablated_batch = self.ablator.forward(i_batch, column_pos=pos) - # Perform prediction - model_outputs = self.model(ablated_batch) - - if self.algorithm == 'levine2020': - if self.logits: - model_outputs = torch.nn.functional.softmax(model_outputs, dim=1) - model_outputs = model_outputs >= self.threshold - pred_counts += model_outputs.cpu().numpy() - else: - pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1).cpu()] += 1 + if self.ablation_type == 'column': + for pos in range(i_batch.shape[-1]): + ablated_batch = self.ablator.forward(i_batch, column_pos=pos) + # Perform prediction + model_outputs = self.model(ablated_batch) + + if self.algorithm == 'levine2020': + if self.logits: + model_outputs = torch.nn.functional.softmax(model_outputs, dim=1) + model_outputs = model_outputs >= self.threshold + pred_counts += model_outputs.cpu().numpy() + else: + pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1).cpu()] += 1 + else: + for column_pos in range(i_batch.shape[-1]): + for row_pos in range(i_batch.shape[-2]): + ablated_batch = self.ablator.forward(i_batch, column_pos=column_pos, row_pos=row_pos) + model_outputs = self.model(ablated_batch) + if self.algorithm == 'levine2020': + if self.logits: + model_outputs = torch.nn.functional.softmax(model_outputs, dim=1) + model_outputs = model_outputs >= self.threshold + pred_counts += model_outputs.cpu().numpy() + else: + + # model_outputs = torch.nn.functional.softmax(model_outputs, dim=1) + # model_outputs = model_outputs >= 0.3 + # pred_counts += model_outputs.cpu().numpy() + + pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1).cpu()] += 1 _, cert_and_correct, top_predicted_class = self.ablator.certify( pred_counts, size_to_certify=size_to_certify, label=o_batch diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index 996d6e675b..f4c6ac1be3 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -187,7 +187,8 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 art_warning(e) @pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") -def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10_data): +@pytest.mark.parametrize("ablation", ["block", "column"]) +def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10_data, ablation): """ Assert implementations matches original with a forward pass through the same model architecture. Note, there are some differences in architecture between the same model names. @@ -252,7 +253,7 @@ def forward(self, ones_mask): from custom_models import preprocess preprocess.MaskProcessor = MaskProcessor - from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator + from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator, BlockAblator from custom_models.vision_transformer import vit_small_patch16_224, vit_base_patch16_224 cifar_data = fix_get_cifar10_data[0][:50] @@ -317,23 +318,34 @@ def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: madry_vit.load_state_dict(art_sd) madry_vit = madry_vit.to(device) - col_ablator = ColumnAblator( - ablation_size=4, - channels_first=True, - to_reshape=True, - mode='ViT', - original_shape=(3, 32, 32), - output_shape=(3, 224, 224), - ) - - ablated = col_ablator.forward(cifar_data, column_pos=10) + if ablation == 'column': + ablator = ColumnAblator( + ablation_size=4, + channels_first=True, + to_reshape=True, + mode='ViT', + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + ) + ablated = ablator.forward(cifar_data, column_pos=10) + elif ablation == 'block': + ablator = BlockAblator( + ablation_size=4, + channels_first=True, + to_reshape=True, + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + mode='ViT', + ) + ablated = ablator.forward(cifar_data, column_pos=10, row_pos=28) madry_preds = madry_vit(ablated) art_preds = art_model.model(ablated) assert torch.allclose(madry_preds, art_preds, rtol=1e-04, atol=1e-04) @pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") -def test_certification_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10_data): +@pytest.mark.parametrize("ablation", ["block", "column"]) +def test_certification_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10_data, ablation): """ With the forward pass equivalence asserted, we now confirm that the certification functions in the same way by doing a full end to end prediction and certification test over the data. @@ -354,7 +366,10 @@ def __init__(self): self.dataset = 'cifar10' self.certify_out_dir = './' self.exp_name = 'tests' - self.certify_mode = 'col' + if ablation == 'column': + self.certify_mode = 'col' + if ablation == 'block': + self.certify_mode = 'block' self.batch_id = None class DataSet(Dataset): @@ -387,13 +402,17 @@ def __getitem__(self, idx): loss=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.SGD, optimizer_params={"lr": 0.01}, - input_shape=(3, 224, 224), + # input_shape=(3, 224, 224), + input_shape=(3, 32, 32), nb_classes=10, + ablation_type=ablation, ablation_size=4, load_pretrained=True, replace_last_layer=True, verbose=False, ) + if os.path.isfile('vit_small_patch16_224_block.pt'): + art_model.model.load_state_dict(torch.load('vit_small_patch16_224_block.pt')) class WrappedModel(torch.nn.Module): """ @@ -402,8 +421,11 @@ class WrappedModel(torch.nn.Module): def __init__(self, my_model): super().__init__() self.model = my_model + self.upsample = torch.nn.Upsample(scale_factor=224 / 32) def forward(self, x): + if x.shape[-1] != 224: + x = self.upsample(x) x = self.model(x) return x, 'filler_arg' @@ -442,12 +464,12 @@ def __getitem__(self, idx): cifar_data = torch.from_numpy(fix_get_cifar10_data[0][:num_to_fetch]).to(device) cifar_labels = torch.from_numpy(fix_get_cifar10_data[1][:num_to_fetch]).to(device) - upsample = torch.nn.Upsample(scale_factor=224 / 32) - cifar_data = upsample(cifar_data) + # upsample = torch.nn.Upsample(scale_factor=224 / 32) + # cifar_data = upsample(cifar_data) if torch.cuda.is_available(): dataset = DataSet(cifar_data, cifar_labels) - validation_loader = DataLoader(dataset, batch_size=64) + validation_loader = DataLoader(dataset, batch_size=num_to_fetch) else: validation_loader = MyDataloader(cifar_data, cifar_labels) @@ -460,11 +482,20 @@ def __getitem__(self, idx): store=None) summary = torch.load('tests/m4_s4_summary.pth') print('the summary is ', summary) - acc, cert_acc = art_model.eval_and_certify(x=cifar_data.cpu().numpy(), y=cifar_labels.cpu().numpy(), size_to_certify=4) - print('cert_acc ', cert_acc) - print('acc ', acc) - assert cert_acc == torch.tensor(summary['cert_acc']) - assert acc == torch.tensor(summary['smooth_acc']) + acc, cert_acc = art_model.eval_and_certify(x=cifar_data.cpu().numpy(), + y=cifar_labels.cpu().numpy(), + batch_size=num_to_fetch, + size_to_certify=4) + + assert torch.allclose(torch.tensor(cert_acc), torch.tensor(summary['cert_acc'])) + assert torch.tensor(acc) == torch.tensor(summary['smooth_acc']) + + upsample = torch.nn.Upsample(scale_factor=224 / 32) + cifar_data = upsample(cifar_data) + acc_non_ablation = art_model.model(cifar_data) + acc_non_ablation = art_model.get_accuracy(acc_non_ablation, cifar_labels) + print('acc non ablation ', acc_non_ablation) + assert np.allclose(acc_non_ablation.astype(float), summary['acc']) @pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") From bac6ff4c0c4c22f584a7bc48b3c2a06c49cbb4e6 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 2 Aug 2023 16:15:20 +0000 Subject: [PATCH 31/55] updating for derandomised smoothing tests Signed-off-by: GiulioZizzo --- .../derandomized_smoothing_pytorch.py | 12 +++++++++--- .../derandomized_smoothing/pytorch.py | 16 +++++----------- .../certification/test_derandomized_smoothing.py | 12 +++++++----- 3 files changed, 21 insertions(+), 19 deletions(-) diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py index f3658afe05..7456cb50ff 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py @@ -131,7 +131,7 @@ def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] :return: The albated input with an extra channel indicating the location of the ablation """ - if x.shape[1] != self.original_shape[0]: + if x.shape[1] != self.original_shape[0] and self.algorithm == 'salman2021': raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") if column_pos is None: @@ -147,6 +147,9 @@ def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] if self.additional_channels: x = torch.cat([x, 1.0 - x], dim=1) + if x.shape[1] != self.original_shape[0] and self.additional_channels: + raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") + x = self.ablate(x, column_pos=column_pos) if self.to_reshape: @@ -280,7 +283,7 @@ def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] :return: The albated input with an extra channel indicating the location of the ablation """ - if x.shape[1] != self.original_shape[0]: + if x.shape[1] != self.original_shape[0] and self.algorithm == 'salman2021': raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") if column_pos is None: @@ -299,6 +302,9 @@ def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] if self.additional_channels: x = torch.cat([x, 1.0 - x], dim=1) + if x.shape[1] != self.original_shape[0] and self.additional_channels: + raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") + x = self.ablate(x, column_pos=column_pos, row_pos=row_pos) if self.to_reshape: @@ -343,7 +349,7 @@ def certify(self, cert_and_correct = cert & (label == top_predicted_class) if self.algorithm == 'levine2020': - tie_break_certs = ((top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1))\ + tie_break_certs = ((top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1)**2)\ & (top_predicted_class < second_predicted_class) cert = torch.logical_or(cert, tie_break_certs) return cert, cert_and_correct, top_predicted_class_argmax \ No newline at end of file diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index ea8c0a81e8..a1af26d147 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -435,15 +435,6 @@ def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndar return np.sum(np.argmax(preds, axis=1) == labels) / len(labels) - ''' - def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: - if self.mode == "ViT": - return PyTorchClassifier.predict(self, x, batch_size, training_mode, **kwargs) - if self.mode == "CNN": - return PyTorchDeRandomizedSmoothingCNN.predict(self, x, batch_size, training_mode, **kwargs) - raise ValueError('mode is not ViT or CNN') - ''' - def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) -> None: """ Method to update the batchnorm of a neural network on small datasets when it was pre-trained @@ -560,7 +551,9 @@ def eval_and_certify( def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: import torch - x = x.astype(ART_NUMPY_DTYPE) + if isinstance(x, torch.Tensor): + x = x.cpu().numpy() + outputs = PyTorchClassifier.predict(self, x=x, batch_size=batch_size, training_mode=training_mode, **kwargs) if self.algorithm == 'levine2020': @@ -571,7 +564,8 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo ) return outputs - def predict(self, x, batch_size, training_mode, **kwargs): + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: + if self._channels_first: columns_in_data = x.shape[-1] rows_in_data = x.shape[-2] diff --git a/tests/estimators/certification/test_derandomized_smoothing.py b/tests/estimators/certification/test_derandomized_smoothing.py index 1c93dfec9e..3cb2037d4a 100644 --- a/tests/estimators/certification/test_derandomized_smoothing.py +++ b/tests/estimators/certification/test_derandomized_smoothing.py @@ -126,7 +126,7 @@ def forward(self, x): criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(ptc.parameters(), lr=0.01, momentum=0.9) try: - for ablation_type in ["column", "row", "block"]: + for ablation_type in ["column", "block"]: classifier = PyTorchDeRandomizedSmoothing( model=ptc, clip_values=(0, 1), @@ -137,6 +137,7 @@ def forward(self, x): ablation_type=ablation_type, ablation_size=5, threshold=0.3, + algorithm='levine2020', logits=True, ) classifier.fit(x=dataset[0], y=dataset[1], nb_epochs=1) @@ -267,16 +268,17 @@ def load_weights(self): ablation_type=ablation_type, ablation_size=ablation_size, threshold=0.3, + algorithm='levine2020', logits=True, ) preds = classifier.predict(np.copy(fix_get_mnist_data[0])) - num_certified = classifier.ablator.certify(preds, size_to_certify=size_to_certify) - + cert, cert_and_correct, top_predicted_class_argmax = classifier.ablator.certify(preds, + size_to_certify=size_to_certify) if ablation_type == "column": - assert np.sum(num_certified) == 52 + assert np.sum(cert.cpu().numpy()) == 52 else: - assert np.sum(num_certified) == 22 + assert np.sum(cert.cpu().numpy()) == 22 except ARTTestException as e: art_warning(e) From d3e1d71bec182076e9fdad1eb5954b345fe73035 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 2 Aug 2023 16:17:09 +0000 Subject: [PATCH 32/55] black formatting. Removal of legacy code Signed-off-by: GiulioZizzo --- .../derandomized_smoothing_pytorch.py | 94 ++- .../derandomized_smoothing/pytorch.py | 56 +- .../smooth_vit_to_remove.py | 169 ---- .../__init__.py | 4 - .../pytorch.py | 720 ------------------ .../smooth_vit.py | 150 ---- 6 files changed, 92 insertions(+), 1101 deletions(-) delete mode 100644 art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit_to_remove.py delete mode 100644 art/estimators/certification/smoothed_vision_transformers_old/__init__.py delete mode 100644 art/estimators/certification/smoothed_vision_transformers_old/pytorch.py delete mode 100644 art/estimators/certification/smoothed_vision_transformers_old/smooth_vit.py diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py index 7456cb50ff..68500a2514 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py @@ -70,7 +70,7 @@ def __init__( to_reshape: bool, original_shape: Optional[Tuple] = None, output_shape: Optional[Tuple] = None, - algorithm: str = 'salman2021', + algorithm: str = "salman2021", device_type: str = "gpu", ): """ @@ -92,9 +92,9 @@ def __init__( self.algorithm = algorithm self.original_shape = original_shape - if self.algorithm == 'levine2020': + if self.algorithm == "levine2020": self.additional_channels = True - if self.algorithm == 'salman2021' and mode == 'ViT': + if self.algorithm == "salman2021" and mode == "ViT": self.add_ablation_mask = True if device_type == "cpu" or not torch.cuda.is_available(): @@ -122,16 +122,21 @@ def ablate(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: x[:, :, :, column_pos + k :] = 0.0 return x - def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None) -> torch.Tensor: + def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None, row_pos=None) -> torch.Tensor: """ Forward pass though the ablator. We insert a new channel to keep track of the ablation location. :param x: Input data :param column_pos: The start position of the albation + :param row_pos: Unused. :return: The albated input with an extra channel indicating the location of the ablation """ - if x.shape[1] != self.original_shape[0] and self.algorithm == 'salman2021': + if ( + self.original_shape is not None + and x.shape[1] != self.original_shape[0] + and self.algorithm == "salman2021" + ): raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") if column_pos is None: @@ -147,19 +152,25 @@ def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] if self.additional_channels: x = torch.cat([x, 1.0 - x], dim=1) - if x.shape[1] != self.original_shape[0] and self.additional_channels: + if ( + self.original_shape is not None + and x.shape[1] != self.original_shape[0] + and self.additional_channels + ): raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") - x = self.ablate(x, column_pos=column_pos) + ablated_x = self.ablate(x, column_pos=column_pos) if self.to_reshape: - x = self.upsample(x) - return x + ablated_x = self.upsample(ablated_x) + return ablated_x - def certify(self, - pred_counts: Union[torch.Tensor, np.ndarray], - size_to_certify: int, - label: Union[torch.Tensor, np.ndarray] = None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + def certify( + self, + pred_counts: Union[torch.Tensor, np.ndarray], + size_to_certify: int, + label: Union[torch.Tensor, np.ndarray], + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Performs certification of the predictions @@ -193,9 +204,10 @@ def certify(self, cert_and_correct = cert & (label == top_predicted_class) - if self.algorithm == 'levine2020': - tie_break_certs = ((top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1))\ - & (top_predicted_class < second_predicted_class) + if self.algorithm == "levine2020": + tie_break_certs = ( + (top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1) + ) & (top_predicted_class < second_predicted_class) cert = torch.logical_or(cert, tie_break_certs) return cert, cert_and_correct, top_predicted_class_argmax @@ -213,7 +225,7 @@ def __init__( to_reshape: bool, original_shape: Optional[Tuple] = None, output_shape: Optional[Tuple] = None, - algorithm: str = 'salman2021', + algorithm: str = "salman2021", device_type: str = "gpu", ): """ @@ -235,9 +247,9 @@ def __init__( self.algorithm = algorithm self.original_shape = original_shape - if self.algorithm == 'levine2020': + if self.algorithm == "levine2020": self.additional_channels = True - if self.algorithm == 'salman2021' and mode == 'ViT': + if self.algorithm == "salman2021" and mode == "ViT": self.add_ablation_mask = True if device_type == "cpu" or not torch.cuda.is_available(): @@ -274,7 +286,9 @@ def ablate(self, x: torch.Tensor, column_pos: int, row_pos: int) -> torch.Tensor x[:, :, row_pos + k :, :] = 0.0 return x - def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None, row_pos: Optional[int] = None) -> torch.Tensor: + def forward( + self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None, row_pos: Optional[int] = None + ) -> torch.Tensor: """ Forward pass though the ablator. We insert a new channel to keep track of the ablation location. @@ -282,8 +296,11 @@ def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] :param column_pos: The start position of the albation :return: The albated input with an extra channel indicating the location of the ablation """ - - if x.shape[1] != self.original_shape[0] and self.algorithm == 'salman2021': + if ( + self.original_shape is not None + and x.shape[1] != self.original_shape[0] + and self.algorithm == "salman2021" + ): raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") if column_pos is None: @@ -302,19 +319,25 @@ def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] if self.additional_channels: x = torch.cat([x, 1.0 - x], dim=1) - if x.shape[1] != self.original_shape[0] and self.additional_channels: + if ( + self.original_shape is not None + and x.shape[1] != self.original_shape[0] + and self.additional_channels + ): raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") - x = self.ablate(x, column_pos=column_pos, row_pos=row_pos) + ablated_x = self.ablate(x, column_pos=column_pos, row_pos=row_pos) if self.to_reshape: - x = self.upsample(x) - return x + ablated_x = self.upsample(ablated_x) + return ablated_x - def certify(self, - pred_counts: Union[torch.Tensor, np.ndarray], - size_to_certify: int, - label: Union[torch.Tensor, np.ndarray] = None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + def certify( + self, + pred_counts: Union[torch.Tensor, np.ndarray], + size_to_certify: int, + label: Union[torch.Tensor, np.ndarray], + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Performs certification of the predictions @@ -344,12 +367,13 @@ def certify(self, top_class_counts, top_predicted_class = pred_counts.kthvalue(num_of_classes, dim=1) second_class_counts, second_predicted_class = pred_counts.kthvalue(num_of_classes - 1, dim=1) - cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1)**2 + cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) ** 2 cert_and_correct = cert & (label == top_predicted_class) - if self.algorithm == 'levine2020': - tie_break_certs = ((top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1)**2)\ - & (top_predicted_class < second_predicted_class) + if self.algorithm == "levine2020": + tie_break_certs = ( + (top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1) ** 2 + ) & (top_predicted_class < second_predicted_class) cert = torch.logical_or(cert, tie_break_certs) - return cert, cert_and_correct, top_predicted_class_argmax \ No newline at end of file + return cert, cert_and_correct, top_predicted_class_argmax diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index a1af26d147..66a4c5c3bc 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -76,8 +76,8 @@ def __init__( input_shape: Tuple[int, ...], nb_classes: int, ablation_size: int, - algorithm: str = 'salman2021', - ablation_type: str = 'column', + algorithm: str = "salman2021", + ablation_type: str = "column", replace_last_layer: Optional[bool] = None, drop_tokens: bool = True, load_pretrained: bool = True, @@ -133,10 +133,11 @@ def __init__( """ import torch + print(algorithm) self.mode = None - if importlib.util.find_spec("timm") is not None and algorithm == 'salman2021': + if importlib.util.find_spec("timm") is not None and algorithm == "salman2021": from timm.models.vision_transformer import VisionTransformer if isinstance(model, (VisionTransformer, str)): @@ -225,14 +226,14 @@ def __init__( self.mode = "CNN" output_shape = input_shape self.to_reshape = False - print('We are here!') + print("We are here!") - elif algorithm == 'levine2020': + elif algorithm == "levine2020": if ablation_type is None or threshold is None or logits is None: raise ValueError( "If using CNN please specify if the model returns logits, " " the prediction threshold, and ablation type" - ) + ) self.mode = "CNN" output_shape = input_shape self.to_reshape = False @@ -259,13 +260,18 @@ def __init__( self.ablation_size = (ablation_size,) self.algorithm = algorithm self.ablation_type = ablation_type - if verbose: logger.info(self.model) - from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator, BlockAblator + from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ( + ColumnAblator, + BlockAblator, + ) + + if TYPE_CHECKING: + self.ablator: Union[ColumnAblator, BlockAblator] - if ablation_type == 'column': + if ablation_type == "column": self.ablator = ColumnAblator( ablation_size=ablation_size, channels_first=True, @@ -276,7 +282,7 @@ def __init__( algorithm=algorithm, mode=self.mode, ) - elif ablation_type == 'block': + elif ablation_type == "block": self.ablator = BlockAblator( ablation_size=ablation_size, channels_first=True, @@ -444,8 +450,9 @@ def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) - :param nb_epochs: How many times to forward pass over the input data """ import torch - if self.mode != 'ViT': - raise ValueError('Accessing a ViT specific functionality while running in CNN mode') + + if self.mode != "ViT": + raise ValueError("Accessing a ViT specific functionality while running in CNN mode") self.model.train() @@ -455,8 +462,9 @@ def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) - with torch.no_grad(): for _ in tqdm(range(nb_epochs)): for m in tqdm(range(num_batch)): - i_batch = self.ablator.forward(np.copy(x[ind[m * batch_size : (m + 1) * batch_size]]), - column_pos=random.randint(0, x.shape[3])) + i_batch = self.ablator.forward( + np.copy(x[ind[m * batch_size : (m + 1) * batch_size]]), column_pos=random.randint(0, x.shape[3]) + ) _ = self.model(i_batch) def eval_and_certify( @@ -505,13 +513,13 @@ def eval_and_certify( o_batch = y_preprocessed[m * batch_size : (m + 1) * batch_size] pred_counts = np.zeros((len(i_batch), self.nb_classes)) - if self.ablation_type == 'column': + if self.ablation_type == "column": for pos in range(i_batch.shape[-1]): ablated_batch = self.ablator.forward(i_batch, column_pos=pos) # Perform prediction model_outputs = self.model(ablated_batch) - if self.algorithm == 'levine2020': + if self.algorithm == "levine2020": if self.logits: model_outputs = torch.nn.functional.softmax(model_outputs, dim=1) model_outputs = model_outputs >= self.threshold @@ -523,13 +531,12 @@ def eval_and_certify( for row_pos in range(i_batch.shape[-2]): ablated_batch = self.ablator.forward(i_batch, column_pos=column_pos, row_pos=row_pos) model_outputs = self.model(ablated_batch) - if self.algorithm == 'levine2020': + if self.algorithm == "levine2020": if self.logits: model_outputs = torch.nn.functional.softmax(model_outputs, dim=1) model_outputs = model_outputs >= self.threshold pred_counts += model_outputs.cpu().numpy() else: - # model_outputs = torch.nn.functional.softmax(model_outputs, dim=1) # model_outputs = model_outputs >= 0.3 # pred_counts += model_outputs.cpu().numpy() @@ -548,15 +555,19 @@ def eval_and_certify( return (accuracy / n_samples), (cert_sum / n_samples) - def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: + def _predict_classifier( + self, x: Union[np.ndarray, "torch.Tensor"], batch_size: int, training_mode: bool, **kwargs + ) -> np.ndarray: import torch if isinstance(x, torch.Tensor): - x = x.cpu().numpy() + x_numpy = x.cpu().numpy() - outputs = PyTorchClassifier.predict(self, x=x, batch_size=batch_size, training_mode=training_mode, **kwargs) + outputs = PyTorchClassifier.predict( + self, x=x_numpy, batch_size=batch_size, training_mode=training_mode, **kwargs + ) - if self.algorithm == 'levine2020': + if self.algorithm == "levine2020": if not self.logits: return np.asarray((outputs >= self.threshold)) return np.asarray( @@ -565,7 +576,6 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo return outputs def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: - if self._channels_first: columns_in_data = x.shape[-1] rows_in_data = x.shape[-2] diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit_to_remove.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit_to_remove.py deleted file mode 100644 index b29115af23..0000000000 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/smooth_vit_to_remove.py +++ /dev/null @@ -1,169 +0,0 @@ -# MIT License -# -# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 -# -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the -# Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -""" -This module implements Certified Patch Robustness via Smoothed Vision Transformers - -| Paper link Accepted version: - https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf - -| Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf -""" - -from typing import Optional, Union, Tuple -import random - -import numpy as np -import torch - - -class UpSampler(torch.nn.Module): - """ - Resizes datasets to the specified size. - Usually for upscaling datasets like CIFAR to Imagenet format - """ - - def __init__(self, input_size: int, final_size: int) -> None: - """ - Creates an upsampler to make the supplied data match the pre-trained ViT format - - :param input_size: Size of the current input data - :param final_size: Desired final size - """ - super().__init__() - self.upsample = torch.nn.Upsample(scale_factor=final_size / input_size) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - """ - Forward pass though the upsampler. - - :param x: Input data - :return: The upsampled input data - """ - return self.upsample(x) - - -class ColumnAblator(torch.nn.Module): - """ - Pure Pytorch implementation of stripe/column ablation. - """ - - def __init__( - self, - ablation_size: int, - channels_first: bool, - to_reshape: bool = False, - original_shape: Optional[Tuple] = None, - output_shape: Optional[Tuple] = None, - add_ablation_mask: bool = True, - device_type: str = "gpu", - ): - """ - Creates a column ablator - - :param ablation_size: The size of the column we will retain. - :param channels_first: If the input is in channels first format. Currently required to be True. - :param to_reshape: If the input requires reshaping. - :param original_shape: Original shape of the input. - :param output_shape: Input shape expected by the ViT. Usually means upscaling the input to 224 x 224. - """ - super().__init__() - self.ablation_size = ablation_size - self.channels_first = channels_first - self.to_reshape = to_reshape - self.expected_input_channels = 1 - self.add_ablation_mask = add_ablation_mask - - if device_type == "cpu" or not torch.cuda.is_available(): - self.device = torch.device("cpu") - else: # pragma: no cover - cuda_idx = torch.cuda.current_device() - self.device = torch.device(f"cuda:{cuda_idx}") - - if original_shape is not None and output_shape is not None: - self.upsample = UpSampler(input_size=original_shape[1], final_size=output_shape[1]) - - def ablate(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: - """ - Ablates the input colum wise - - :param x: Input data - :param column_pos: The start position of the albation - :return: The ablated input with 0s where the ablation occurred - """ - k = self.ablation_size - if column_pos + k > x.shape[-1]: - x[:, :, :, (column_pos + k) % x.shape[-1] : column_pos] = 0.0 - else: - x[:, :, :, :column_pos] = 0.0 - x[:, :, :, column_pos + k :] = 0.0 - return x - - def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None) -> torch.Tensor: - """ - Forward pass though the ablator. We insert a new channel to keep track of the ablation location. - - :param x: Input data - :param column_pos: The start position of the albation - :return: The albated input with an extra channel indicating the location of the ablation - """ - assert x.shape[1] == self.expected_input_channels - - if column_pos is None: - column_pos = random.randint(0, x.shape[3]) - - if isinstance(x, np.ndarray): - x = torch.from_numpy(x).to(self.device) - - if self.add_ablation_mask: - ones = torch.torch.ones_like(x[:, 0:1, :, :]).to(self.device) - x = torch.cat([x, ones], dim=1) - - x = self.ablate(x, column_pos=column_pos) - if self.to_reshape: - x = self.upsample(x) - return x - - def certify( - self, pred_counts: Union[torch.Tensor, np.ndarray], size_to_certify: int, label: Union[torch.Tensor, np.ndarray] - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Performs certification of the predictions - - :param pred_counts: The model predictions over the ablated data. - :param size_to_certify: The patch size we wish to check certification against - :param label: The ground truth labels - :return: A tuple consisting of: the certified predictions, - the predictions which were certified and also correct, - and the most predicted class across the different ablations on the input. - """ - if isinstance(pred_counts, np.ndarray): - pred_counts = torch.from_numpy(pred_counts).to(self.device) - - if isinstance(label, np.ndarray): - label = torch.from_numpy(label).to(self.device) - - num_of_classes = pred_counts.shape[-1] - - top_class_counts, top_predicted_class = pred_counts.kthvalue(num_of_classes, dim=1) - second_class_counts, _ = pred_counts.kthvalue(num_of_classes - 1, dim=1) - - cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) - - cert_and_correct = cert & (label == top_predicted_class) - - return cert, cert_and_correct, top_predicted_class diff --git a/art/estimators/certification/smoothed_vision_transformers_old/__init__.py b/art/estimators/certification/smoothed_vision_transformers_old/__init__.py deleted file mode 100644 index 5791128b5e..0000000000 --- a/art/estimators/certification/smoothed_vision_transformers_old/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -""" -Smoothed ViT estimators. -""" -from art.estimators.certification.smoothed_vision_transformers.pytorch import PyTorchSmoothedViT diff --git a/art/estimators/certification/smoothed_vision_transformers_old/pytorch.py b/art/estimators/certification/smoothed_vision_transformers_old/pytorch.py deleted file mode 100644 index c4ba0ed050..0000000000 --- a/art/estimators/certification/smoothed_vision_transformers_old/pytorch.py +++ /dev/null @@ -1,720 +0,0 @@ -# MIT License -# -# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 -# -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the -# Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -""" -This module implements Certified Patch Robustness via Smoothed Vision Transformers - -| Paper link Accepted version: - https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf - -| Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf -""" -from __future__ import absolute_import, division, print_function, unicode_literals - -import logging -from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING -import random - -import numpy as np -from timm.models.vision_transformer import VisionTransformer -import torch -from tqdm import tqdm - -from art.estimators.classification.pytorch import PyTorchClassifier -from art.estimators.certification.smoothed_vision_transformers.smooth_vit import ColumnAblator -from art.utils import check_and_transform_label_format - -if TYPE_CHECKING: - import torchvision - from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE - from art.defences.preprocessor import Preprocessor - from art.defences.postprocessor import Postprocessor - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -class PatchEmbed(torch.nn.Module): - """ - Image to Patch Embedding - - Class adapted from the implementation in https://github.com/MadryLab/smoothed-vit - - Original License: - - MIT License - - Copyright (c) 2021 Madry Lab - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE - """ - - def __init__(self, patch_size: int = 16, in_channels: int = 1, embed_dim: int = 768): - """ - Specifies the configuration for the convolutional layer. - - :param patch_size: The patch size used by the ViT. - :param in_channels: Number of input channels. - :param embed_dim: The embedding dimension used by the ViT. - """ - super().__init__() - self.patch_size = patch_size - self.in_channels = in_channels - self.embed_dim = embed_dim - self.proj: Optional[torch.nn.Conv2d] = None - - def create(self, patch_size=None, embed_dim=None, device="cpu", **kwargs) -> None: # pylint: disable=W0613 - """ - Creates a convolution that mimics the embedding layer to be used for the ablation mask to - track where the image was ablated. - - :param patch_size: The patch size used by the ViT - :param embed_dim: The embedding dimension used by the ViT - :param device: Which device to set the emdedding layer to. - :param kwargs: Handles the remaining kwargs from the ViT configuration. - """ - - if patch_size is not None: - self.patch_size = patch_size - if embed_dim is not None: - self.embed_dim = embed_dim - - self.proj = torch.nn.Conv2d( - in_channels=self.in_channels, - out_channels=self.embed_dim, - kernel_size=self.patch_size, - stride=self.patch_size, - bias=False, - ) - w_shape = self.proj.weight.shape - self.proj.weight = torch.nn.Parameter(torch.ones(w_shape).to(device)) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - """ - Forward pass through the embedder. We are simply tracking the positions of the ablation mask so no gradients - are required. - - :param x: Input data corresponding to the ablation mask - :return: The embedded input - """ - if self.proj is not None: - with torch.no_grad(): - x = self.proj(x).flatten(2).transpose(1, 2) - return x - raise ValueError("Projection layer not yet created.") - - -class ArtViT(VisionTransformer): - """ - Art class inheriting from VisionTransformer to control the forward pass of the ViT. - """ - - # Make as a class attribute to avoid being included in the - # state dictionaries of the ViT Model. - ablation_mask_embedder = PatchEmbed(in_channels=1) - - def __init__(self, **kwargs): - """ - Create a ArtViT instance - :param kwargs: keyword arguments required to create the mask embedder and the vision transformer class - Must contain ... - """ - self.to_drop_tokens = kwargs["drop_tokens"] - - if kwargs["device_type"] == "cpu" or not torch.cuda.is_available(): - self.device = torch.device("cpu") - else: # pragma: no cover - cuda_idx = torch.cuda.current_device() - self.device = torch.device(f"cuda:{cuda_idx}") - - del kwargs["drop_tokens"] - del kwargs["device_type"] - - super().__init__(**kwargs) - self.ablation_mask_embedder.create(device=self.device, **kwargs) - - self.in_chans = kwargs["in_chans"] - self.img_size = kwargs["img_size"] - - @staticmethod - def drop_tokens(x: torch.Tensor, indexes: torch.Tensor) -> torch.Tensor: - """ - Drops the tokens which correspond to fully masked inputs - - :param x: Input data - :param indexes: positions to be ablated - :return: Input with tokens dropped where the input was fully ablated. - """ - x_no_cl, cls_token = x[:, 1:], x[:, 0:1] - shape = x_no_cl.shape - - # reshape to temporarily remove batch - x_no_cl = torch.reshape(x_no_cl, shape=(-1, shape[-1])) - indexes = torch.reshape(indexes, shape=(-1,)) - indexes = indexes.nonzero(as_tuple=True)[0] - x_no_cl = torch.index_select(x_no_cl, dim=0, index=indexes) - x_no_cl = torch.reshape(x_no_cl, shape=(shape[0], -1, shape[-1])) - return torch.cat((cls_token, x_no_cl), dim=1) - - def forward_features(self, x: torch.Tensor) -> torch.Tensor: - """ - The forward pass of the ViT. - - :param x: Input data. - :return: The input processed by the ViT backbone - """ - - ablated_input = False - if x.shape[1] == self.in_chans + 1: - ablated_input = True - - if ablated_input: - x, ablation_mask = x[:, : self.in_chans], x[:, self.in_chans : self.in_chans + 1] - - x = self.patch_embed(x) - x = self._pos_embed(x) - - if self.to_drop_tokens and ablated_input: - ones = self.ablation_mask_embedder(ablation_mask) - to_drop = torch.sum(ones, dim=2) - indexes = torch.gt(torch.where(to_drop > 1, 1, 0), 0) - x = self.drop_tokens(x, indexes) - - x = self.norm_pre(x) - x = self.blocks(x) - return self.norm(x) - - -class PyTorchSmoothedViT(PyTorchClassifier): - """ - Implementation of Certified Patch Robustness via Smoothed Vision Transformers - - | Paper link Accepted version: - https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf - - | Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf - """ - - def __init__( - self, - model: Union[VisionTransformer, str], - loss: "torch.nn.modules.loss._Loss", - input_shape: Tuple[int, ...], - nb_classes: int, - ablation_size: int, - replace_last_layer: bool, - drop_tokens: bool = True, - load_pretrained: bool = True, - optimizer: Union[type, "torch.optim.Optimizer", None] = None, - optimizer_params: Optional[dict] = None, - channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, - preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), - device_type: str = "gpu", - verbose: bool = True, - ): - """ - Create a smoothed ViT classifier. - - :param model: Either a string specifying which ViT architecture to load, or a vision transformer already - created with the Pytorch Image Models (timm) library. - :param loss: The loss function for which to compute gradients for training. The target label must be raw - categorical, i.e. not converted to one-hot encoding. - :param input_shape: The shape of one input instance. - :param nb_classes: The number of classes of the model. - :param ablation_size: The size of the data portion to retain after ablation. - :param replace_last_layer: If to replace the last layer of the ViT with a fresh layer matching the number - of classes for the dataset to be examined. Needed if going from the pre-trained - imagenet models to fine-tune on a dataset like CIFAR. - :param drop_tokens: If to drop the fully ablated tokens in the ViT - :param load_pretrained: If to load a pretrained model matching the ViT name. Will only affect the ViT if a - string name is passed to model rather than a ViT directly. - :param optimizer: The optimizer used to train the classifier. - :param channels_first: Set channels first or last. - :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and - maximum values allowed for features. If floats are provided, these will be used as the range of all - features. If arrays are provided, each value will be considered the bound for a feature, thus - the shape of clip values needs to match the total number of features. - :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier. - :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier. - :param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be - used for data preprocessing. The first value will be subtracted from the input. The input will then - be divided by the second one. - :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. - """ - import timm - - # temporarily assign the original method to tmp_func - tmp_func = timm.models.vision_transformer._create_vision_transformer - - # overrride with ART's ViT creation function - timm.models.vision_transformer._create_vision_transformer = self.art_create_vision_transformer - if isinstance(model, str): - model = timm.create_model( - model, pretrained=load_pretrained, drop_tokens=drop_tokens, device_type=device_type - ) - if replace_last_layer: - model.head = torch.nn.Linear(model.head.in_features, nb_classes) - if isinstance(optimizer, type): - if optimizer_params is not None: - optimizer = optimizer(model.parameters(), **optimizer_params) - else: - raise ValueError("If providing an optimiser please also supply its parameters") - - elif isinstance(model, VisionTransformer): - pretrained_cfg = model.pretrained_cfg - supplied_state_dict = model.state_dict() - supported_models = self.get_models() - if pretrained_cfg["architecture"] not in supported_models: - raise ValueError( - "Architecture not supported. Use PyTorchSmoothedViT.get_models() " - "to get the supported model architectures." - ) - model = timm.create_model(pretrained_cfg["architecture"], drop_tokens=drop_tokens, device_type=device_type) - model.load_state_dict(supplied_state_dict) - if replace_last_layer: - model.head = torch.nn.Linear(model.head.in_features, nb_classes) - - if optimizer is not None: - if not isinstance(optimizer, torch.optim.Optimizer): - raise ValueError("Optimizer error: must be a torch.optim.Optimizer instance") - - converted_optimizer: Union[torch.optim.Adam, torch.optim.SGD] - opt_state_dict = optimizer.state_dict() - if isinstance(optimizer, torch.optim.Adam): - logging.info("Converting Adam Optimiser") - converted_optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) - elif isinstance(optimizer, torch.optim.SGD): - logging.info("Converting SGD Optimiser") - converted_optimizer = torch.optim.SGD(model.parameters(), lr=1e-4) - else: - raise ValueError("Optimiser not supported for conversion") - converted_optimizer.load_state_dict(opt_state_dict) - - self.to_reshape = False - if not isinstance(model, ArtViT): - raise ValueError("Vision transformer is not of ArtViT. Error occurred in ArtViT creation.") - - if model.default_cfg["input_size"][0] != input_shape[0]: - raise ValueError( - f'ViT requires {model.default_cfg["input_size"][0]} channel input,' - f" but {input_shape[0]} channels were provided." - ) - - if model.default_cfg["input_size"] != input_shape: - if verbose: - logger.warning( - f"ViT expects input shape of {model.default_cfg['input_size']}, " - f"but {input_shape} specified as the input shape. " - f"The input will be rescaled to {model.default_cfg['input_size']}" - ) - self.to_reshape = True - - if optimizer is None or isinstance(optimizer, torch.optim.Optimizer): - super().__init__( - model=model, - loss=loss, - input_shape=input_shape, - nb_classes=nb_classes, - optimizer=optimizer, - channels_first=channels_first, - clip_values=clip_values, - preprocessing_defences=preprocessing_defences, - postprocessing_defences=postprocessing_defences, - preprocessing=preprocessing, - device_type=device_type, - ) - else: - raise ValueError("Error occurred in optimizer creation") - - self.ablation_size = (ablation_size,) - - if verbose: - logger.info(self.model) - - self.ablator = ColumnAblator( - ablation_size=ablation_size, - channels_first=True, - to_reshape=self.to_reshape, - original_shape=input_shape, - output_shape=model.default_cfg["input_size"], - device_type=device_type, - ) - - # set the method back to avoid unexpected side effects later on should timm need to be reused. - timm.models.vision_transformer._create_vision_transformer = tmp_func - - @classmethod - def get_models(cls, generate_from_null: bool = False) -> List[str]: - """ - Return the supported model names to the user. - - :param generate_from_null: If to re-check the creation of all the ViTs in timm from scratch. - :return: A list of compatible models - """ - import timm - - supported_models = [ - "vit_base_patch8_224", - "vit_base_patch16_18x2_224", - "vit_base_patch16_224", - "vit_base_patch16_224_miil", - "vit_base_patch16_384", - "vit_base_patch16_clip_224", - "vit_base_patch16_clip_384", - "vit_base_patch16_gap_224", - "vit_base_patch16_plus_240", - "vit_base_patch16_rpn_224", - "vit_base_patch16_xp_224", - "vit_base_patch32_224", - "vit_base_patch32_384", - "vit_base_patch32_clip_224", - "vit_base_patch32_clip_384", - "vit_base_patch32_clip_448", - "vit_base_patch32_plus_256", - "vit_giant_patch14_224", - "vit_giant_patch14_clip_224", - "vit_gigantic_patch14_224", - "vit_gigantic_patch14_clip_224", - "vit_huge_patch14_224", - "vit_huge_patch14_clip_224", - "vit_huge_patch14_clip_336", - "vit_huge_patch14_xp_224", - "vit_large_patch14_224", - "vit_large_patch14_clip_224", - "vit_large_patch14_clip_336", - "vit_large_patch14_xp_224", - "vit_large_patch16_224", - "vit_large_patch16_384", - "vit_large_patch32_224", - "vit_large_patch32_384", - "vit_medium_patch16_gap_240", - "vit_medium_patch16_gap_256", - "vit_medium_patch16_gap_384", - "vit_small_patch16_18x2_224", - "vit_small_patch16_36x1_224", - "vit_small_patch16_224", - "vit_small_patch16_384", - "vit_small_patch32_224", - "vit_small_patch32_384", - "vit_tiny_patch16_224", - "vit_tiny_patch16_384", - ] - - if not generate_from_null: - return supported_models - - supported = [] - unsupported = [] - - models = timm.list_models("vit_*") - for model in models: - logger.info(f"Testing {model} creation") - try: - _ = PyTorchSmoothedViT( - model=model, - loss=torch.nn.CrossEntropyLoss(), - optimizer=torch.optim.SGD, - optimizer_params={"lr": 0.01}, - input_shape=(3, 32, 32), - nb_classes=10, - ablation_size=4, - load_pretrained=False, - replace_last_layer=True, - verbose=False, - ) - supported.append(model) - except (TypeError, AttributeError): - unsupported.append(model) - - if supported != supported_models: - logger.warning( - "Difference between the generated and fixed model list. Although not necessarily " - "an error, this may point to the timm library being updated." - ) - - return supported - - @staticmethod - def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> ArtViT: - """ - Creates a vision transformer using ArtViT which controls the forward pass of the model - - :param variant: The name of the vision transformer to load - :param pretrained: If to load pre-trained weights - :return: A ViT with the required methods needed for ART - """ - - from timm.models._builder import build_model_with_cfg - from timm.models.vision_transformer import checkpoint_filter_fn - - return build_model_with_cfg( - ArtViT, - variant, - pretrained, - pretrained_filter_fn=checkpoint_filter_fn, - **kwargs, - ) - - def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) -> None: - """ - Method to update the batchnorm of a ViT on small datasets - - :param x: Training data. - :param batch_size: Size of batches. - :param nb_epochs: How many times to forward pass over the input data - """ - - self.model.train() - - ind = np.arange(len(x)) - num_batch = int(len(x) / float(batch_size)) - - with torch.no_grad(): - for _ in tqdm(range(nb_epochs)): - for m in tqdm(range(num_batch)): - i_batch = torch.from_numpy(np.copy(x[ind[m * batch_size : (m + 1) * batch_size]])).to(self.device) - i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) - _ = self.model(i_batch) - - def fit( # pylint: disable=W0221 - self, - x: np.ndarray, - y: np.ndarray, - batch_size: int = 128, - nb_epochs: int = 10, - training_mode: bool = True, - drop_last: bool = False, - scheduler: Optional[Any] = None, - update_batchnorm: bool = True, - batchnorm_update_epochs: int = 1, - transform: Optional["torchvision.transforms.transforms.Compose"] = None, - verbose: bool = True, - **kwargs, - ) -> None: - """ - Fit the classifier on the training set `(x, y)`. - - :param x: Training data. - :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of - shape (nb_samples,). - :param batch_size: Size of batches. - :param nb_epochs: Number of epochs to use for training. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. - :param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by - the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then - the last batch will be smaller. (default: ``False``) - :param scheduler: Learning rate scheduler to run at the start of every epoch. - :param update_batchnorm: if to run the training data through the model to update any batch norm statistics prior - to training. Useful on small datasets when using pre-trained ViTs. - :param batchnorm_update_epochs: how many times to forward pass over the training data - to pre-adjust the batchnorm statistics. - :param transform: Torchvision compose of relevant augmentation transformations to apply. - :param verbose: if to display training progress bars - :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch - and providing it takes no effect. - """ - - # Set model mode - self._model.train(mode=training_mode) - - if self._optimizer is None: # pragma: no cover - raise ValueError("An optimizer is needed to train the model, but none for provided.") - - y = check_and_transform_label_format(y, nb_classes=self.nb_classes) - - # Apply preprocessing - x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) - - if update_batchnorm: - self.update_batchnorm(x_preprocessed, batch_size, nb_epochs=batchnorm_update_epochs) - - # Check label shape - y_preprocessed = self.reduce_labels(y_preprocessed) - - num_batch = len(x_preprocessed) / float(batch_size) - if drop_last: - num_batch = int(np.floor(num_batch)) - else: - num_batch = int(np.ceil(num_batch)) - ind = np.arange(len(x_preprocessed)) - - # Start training - for _ in tqdm(range(nb_epochs)): - # Shuffle the examples - random.shuffle(ind) - - epoch_acc = [] - epoch_loss = [] - epoch_batch_sizes = [] - - pbar = tqdm(range(num_batch), disable=not verbose) - - # Train for one epoch - for m in pbar: - i_batch = torch.from_numpy(np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]])).to( - self._device - ) - if transform is not None: - i_batch = transform(i_batch) - i_batch = self.ablator.forward(i_batch, column_pos=random.randint(0, x.shape[3])) - - o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device) - - # Zero the parameter gradients - self._optimizer.zero_grad() - - # Perform prediction - try: - model_outputs = self.model(i_batch) - except ValueError as err: - if "Expected more than 1 value per channel when training" in str(err): - logger.exception( - "Try dropping the last incomplete batch by setting drop_last=True in " - "method PyTorchClassifier.fit." - ) - raise err - - loss = self.loss(model_outputs, o_batch) - acc = self.get_accuracy(preds=model_outputs, labels=o_batch) - - # Do training - if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 - - with amp.scale_loss(loss, self._optimizer) as scaled_loss: - scaled_loss.backward() - - else: - loss.backward() - - self.optimizer.step() - - epoch_acc.append(acc) - epoch_loss.append(loss.cpu().detach().numpy()) - epoch_batch_sizes.append(len(i_batch)) - - if verbose: - pbar.set_description( - f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} " - f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} " - ) - - if scheduler is not None: - scheduler.step() - - def eval_and_certify( - self, - x: np.ndarray, - y: np.ndarray, - size_to_certify: int, - batch_size: int = 128, - verbose: bool = True, - ) -> Tuple["torch.Tensor", "torch.Tensor"]: - """ - Evaluates the ViT's normal and certified performance over the supplied data. - - :param x: Evaluation data. - :param y: Evaluation labels. - :param size_to_certify: The size of the patch to certify against. - If not provided will default to the ablation size. - :param batch_size: batch size when evaluating. - :param verbose: If to display the progress bar - :return: The accuracy and certified accuracy over the dataset - """ - - self.model.eval() - y = check_and_transform_label_format(y, nb_classes=self.nb_classes) - - # Apply preprocessing - x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) - - # Check label shape - y_preprocessed = self.reduce_labels(y_preprocessed) - - num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) - pbar = tqdm(range(num_batch), disable=not verbose) - accuracy = torch.tensor(0.0).to(self._device) - cert_sum = torch.tensor(0.0).to(self._device) - n_samples = 0 - - with torch.no_grad(): - for m in pbar: - if m == (num_batch - 1): - i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size :])).to(self._device) - o_batch = torch.from_numpy(y_preprocessed[m * batch_size :]).to(self._device) - else: - i_batch = torch.from_numpy(np.copy(x_preprocessed[m * batch_size : (m + 1) * batch_size])).to( - self._device - ) - o_batch = torch.from_numpy(y_preprocessed[m * batch_size : (m + 1) * batch_size]).to(self._device) - - predictions = [] - pred_counts = torch.zeros((len(i_batch), self.nb_classes)).to(self._device) - for pos in range(i_batch.shape[-1]): - ablated_batch = self.ablator.forward(i_batch, column_pos=pos) - - # Perform prediction - model_outputs = self.model(ablated_batch) - pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1)] += 1 - predictions.append(model_outputs) - - _, cert_and_correct, top_predicted_class = self.ablator.certify( - pred_counts, size_to_certify=size_to_certify, label=o_batch - ) - cert_sum += torch.sum(cert_and_correct) - accuracy += torch.sum(top_predicted_class == o_batch) - n_samples += len(cert_and_correct) - - pbar.set_description(f"Normal Acc {accuracy / n_samples:.3f} " f"Cert Acc {cert_sum / n_samples:.3f}") - - return (accuracy / n_samples), (cert_sum / n_samples) - - @staticmethod - def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: - """ - Helper function to get the accuracy during training. - - :param preds: model predictions. - :param labels: ground truth labels (not one hot). - :return: prediction accuracy. - """ - if isinstance(preds, torch.Tensor): - preds = preds.detach().cpu().numpy() - - if isinstance(labels, torch.Tensor): - labels = labels.detach().cpu().numpy() - - return np.sum(np.argmax(preds, axis=1) == labels) / len(labels) diff --git a/art/estimators/certification/smoothed_vision_transformers_old/smooth_vit.py b/art/estimators/certification/smoothed_vision_transformers_old/smooth_vit.py deleted file mode 100644 index 2a0f5bc564..0000000000 --- a/art/estimators/certification/smoothed_vision_transformers_old/smooth_vit.py +++ /dev/null @@ -1,150 +0,0 @@ -# MIT License -# -# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 -# -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the -# Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -""" -This module implements Certified Patch Robustness via Smoothed Vision Transformers - -| Paper link Accepted version: - https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf - -| Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf -""" - -from typing import Optional, Tuple - -import torch - - -class UpSampler(torch.nn.Module): - """ - Resizes datasets to the specified size. - Usually for upscaling datasets like CIFAR to Imagenet format - """ - - def __init__(self, input_size: int, final_size: int) -> None: - """ - Creates an upsampler to make the supplied data match the pre-trained ViT format - - :param input_size: Size of the current input data - :param final_size: Desired final size - """ - super().__init__() - self.upsample = torch.nn.Upsample(scale_factor=final_size / input_size) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - """ - Forward pass though the upsampler. - - :param x: Input data - :return: The upsampled input data - """ - return self.upsample(x) - - -class ColumnAblator(torch.nn.Module): - """ - Pure Pytorch implementation of stripe/column ablation. - """ - - def __init__( - self, - ablation_size: int, - channels_first: bool, - to_reshape: bool = False, - original_shape: Optional[Tuple] = None, - output_shape: Optional[Tuple] = None, - device_type: str = "gpu", - ): - """ - Creates a column ablator - - :param ablation_size: The size of the column we will retain. - :param channels_first: If the input is in channels first format. Currently required to be True. - :param to_reshape: If the input requires reshaping. - :param original_shape: Original shape of the input. - :param output_shape: Input shape expected by the ViT. Usually means upscaling the input to 224 x 224. - """ - super().__init__() - self.ablation_size = ablation_size - self.channels_first = channels_first - self.to_reshape = to_reshape - - if device_type == "cpu" or not torch.cuda.is_available(): - self.device = torch.device("cpu") - else: # pragma: no cover - cuda_idx = torch.cuda.current_device() - self.device = torch.device(f"cuda:{cuda_idx}") - - if original_shape is not None and output_shape is not None: - self.upsample = UpSampler(input_size=original_shape[1], final_size=output_shape[1]) - - def ablate(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: - """ - Ablates the input colum wise - - :param x: Input data - :param column_pos: The start position of the albation - :return: The ablated input with 0s where the ablation occurred - """ - k = self.ablation_size - if column_pos + k > x.shape[-1]: - x[:, :, :, (column_pos + k) % x.shape[-1] : column_pos] = 0.0 - else: - x[:, :, :, :column_pos] = 0.0 - x[:, :, :, column_pos + k :] = 0.0 - return x - - def forward(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: - """ - Forward pass though the ablator. We insert a new channel to keep track of the ablation location. - - :param x: Input data - :param column_pos: The start position of the albation - :return: The albated input with an extra channel indicating the location of the ablation - """ - assert x.shape[1] == 3 - ones = torch.torch.ones_like(x[:, 0:1, :, :]).to(self.device) - x = torch.cat([x, ones], dim=1) - x = self.ablate(x, column_pos=column_pos) - if self.to_reshape: - x = self.upsample(x) - return x - - def certify( - self, pred_counts: torch.Tensor, size_to_certify: int, label: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Performs certification of the predictions - - :param pred_counts: The model predictions over the ablated data. - :param size_to_certify: The patch size we wish to check certification against - :param label: The ground truth labels - :return: A tuple consisting of: the certified predictions, - the predictions which were certified and also correct, - and the most predicted class across the different ablations on the input. - """ - - num_of_classes = pred_counts.shape[-1] - - top_class_counts, top_predicted_class = pred_counts.kthvalue(num_of_classes, dim=1) - second_class_counts, _ = pred_counts.kthvalue(num_of_classes - 1, dim=1) - - cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) - - cert_and_correct = cert & (label == top_predicted_class) - - return cert, cert_and_correct, top_predicted_class From fc0f1811f393f7d93dff99a505b087dd6fd4a462 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Thu, 3 Aug 2023 10:12:29 +0100 Subject: [PATCH 33/55] adding row mode and test Signed-off-by: GiulioZizzo --- .../derandomized_smoothing_pytorch.py | 56 +++--- .../derandomized_smoothing/pytorch.py | 34 ++-- .../vision_transformers/vit.py | 5 +- .../test_derandomized_smoothing.py | 16 +- .../certification/test_smooth_vit.py | 179 +++++++++++++----- 5 files changed, 184 insertions(+), 106 deletions(-) diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py index 68500a2514..eecc13693b 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py @@ -68,6 +68,7 @@ def __init__( channels_first: bool, mode, to_reshape: bool, + ablation_mode: str = "column", original_shape: Optional[Tuple] = None, output_shape: Optional[Tuple] = None, algorithm: str = "salman2021", @@ -91,6 +92,7 @@ def __init__( self.additional_channels = False self.algorithm = algorithm self.original_shape = original_shape + self.ablation_mode = ablation_mode if self.algorithm == "levine2020": self.additional_channels = True @@ -122,7 +124,9 @@ def ablate(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: x[:, :, :, column_pos + k :] = 0.0 return x - def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None, row_pos=None) -> torch.Tensor: + def forward( + self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None, row_pos=None + ) -> torch.Tensor: """ Forward pass though the ablator. We insert a new channel to keep track of the ablation location. @@ -131,17 +135,12 @@ def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] :param row_pos: Unused. :return: The albated input with an extra channel indicating the location of the ablation """ + if row_pos is not None: + raise ValueError("Use column_pos for a ColumnAblator. The row_pos argument is unused") - if ( - self.original_shape is not None - and x.shape[1] != self.original_shape[0] - and self.algorithm == "salman2021" - ): + if self.original_shape is not None and x.shape[1] != self.original_shape[0] and self.algorithm == "salman2021": raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") - if column_pos is None: - column_pos = random.randint(0, x.shape[3]) - if isinstance(x, np.ndarray): x = torch.from_numpy(x).to(self.device) @@ -152,15 +151,20 @@ def forward(self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] if self.additional_channels: x = torch.cat([x, 1.0 - x], dim=1) - if ( - self.original_shape is not None - and x.shape[1] != self.original_shape[0] - and self.additional_channels - ): + if self.original_shape is not None and x.shape[1] != self.original_shape[0] and self.additional_channels: raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") + if self.ablation_mode == "row": + x = torch.transpose(x, 3, 2) + + if column_pos is None: + column_pos = random.randint(0, x.shape[3]) + ablated_x = self.ablate(x, column_pos=column_pos) + if self.ablation_mode == "row": + ablated_x = torch.transpose(ablated_x, 3, 2) + if self.to_reshape: ablated_x = self.upsample(ablated_x) return ablated_x @@ -192,7 +196,8 @@ def certify( # NB! argmax and kthvalue handle ties between predicted counts differently. # The original implementation: https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L98 - # uses argmax for the model predictions (later called https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L230) + # uses argmax for the model predictions + # (later called y_smoothed https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L230) # and kthvalue for the certified predictions. # to be consistent with the original implementation we also follow this here. top_predicted_class_argmax = torch.argmax(pred_counts, dim=1) @@ -214,7 +219,7 @@ def certify( class BlockAblator(torch.nn.Module): """ - Pure Pytorch implementation of stripe/column ablation. + Pure Pytorch implementation of block ablation. """ def __init__( @@ -231,7 +236,7 @@ def __init__( """ Creates a column ablator - :param ablation_size: The size of the column we will retain. + :param ablation_size: The size of the block we will retain. :param channels_first: If the input is in channels first format. Currently required to be True. :param to_reshape: If the input requires reshaping. :param original_shape: Original shape of the input. @@ -294,13 +299,9 @@ def forward( :param x: Input data :param column_pos: The start position of the albation - :return: The albated input with an extra channel indicating the location of the ablation + :return: The albated input with an extra channel indicating the location of the ablation if running in """ - if ( - self.original_shape is not None - and x.shape[1] != self.original_shape[0] - and self.algorithm == "salman2021" - ): + if self.original_shape is not None and x.shape[1] != self.original_shape[0] and self.algorithm == "salman2021": raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") if column_pos is None: @@ -319,11 +320,7 @@ def forward( if self.additional_channels: x = torch.cat([x, 1.0 - x], dim=1) - if ( - self.original_shape is not None - and x.shape[1] != self.original_shape[0] - and self.additional_channels - ): + if self.original_shape is not None and x.shape[1] != self.original_shape[0] and self.additional_channels: raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") ablated_x = self.ablate(x, column_pos=column_pos, row_pos=row_pos) @@ -357,7 +354,8 @@ def certify( # NB! argmax and kthvalue handle ties between predicted counts differently. # The original implementation: https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L145 - # uses argmax for the model predictions (later called https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L230) + # uses argmax for the model predictions + # (later called y_smoothed https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L230) # and kthvalue for the certified predictions. # to be consistent with the original implementation we also follow this here. top_predicted_class_argmax = torch.argmax(pred_counts, dim=1) diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 66a4c5c3bc..e9b6a21c36 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -41,7 +41,6 @@ import numpy as np from tqdm import tqdm -from art.config import ART_NUMPY_DTYPE from art.estimators.classification.pytorch import PyTorchClassifier from art.estimators.certification.derandomized_smoothing.vision_transformers.pytorch import PyTorchSmoothedViT from art.utils import check_and_transform_label_format @@ -98,8 +97,8 @@ def __init__( Create a smoothed classifier. :param model: Either a CNN or a VIT. For a ViT supply a string specifying which ViT architecture to load from - the ViT library, or a vision transformer already created with the Pytorch Image Models (timm) library. - To run Levine et al. (2020) provide a regular pytorch model. + the ViT library, or a vision transformer already created with the + Pytorch Image Models (timm) library. To run Levine et al. (2020) provide a regular pytorch model. :param loss: The loss function for which to compute gradients for training. The target label must be raw categorical, i.e. not converted to one-hot encoding. :param input_shape: The shape of one input instance. @@ -134,6 +133,9 @@ def __init__( import torch + if not channels_first: + raise ValueError("Channels must be set to first") + print(algorithm) self.mode = None @@ -209,7 +211,8 @@ def __init__( if model.default_cfg["input_size"] != input_shape: if verbose: logger.warning( - " ViT expects input shape of: (%i, %i, %i) but (%i, %i, %i) specified as the input shape. The input will be rescaled to (%i, %i, %i)", + " ViT expects input shape of: (%i, %i, %i) but (%i, %i, %i) specified as the input shape." + " The input will be rescaled to (%i, %i, %i)", *model.default_cfg["input_size"], *input_shape, *model.default_cfg["input_size"], @@ -271,10 +274,11 @@ def __init__( if TYPE_CHECKING: self.ablator: Union[ColumnAblator, BlockAblator] - if ablation_type == "column": + if ablation_type in {"column", "row"}: self.ablator = ColumnAblator( ablation_size=ablation_size, channels_first=True, + ablation_mode=ablation_type, to_reshape=self.to_reshape, original_shape=input_shape, output_shape=output_shape, @@ -513,35 +517,33 @@ def eval_and_certify( o_batch = y_preprocessed[m * batch_size : (m + 1) * batch_size] pred_counts = np.zeros((len(i_batch), self.nb_classes)) - if self.ablation_type == "column": + if self.ablation_type in {"column", "row"}: for pos in range(i_batch.shape[-1]): ablated_batch = self.ablator.forward(i_batch, column_pos=pos) # Perform prediction model_outputs = self.model(ablated_batch) - if self.algorithm == "levine2020": + if self.algorithm == "salman2021": + pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1).cpu()] += 1 + else: if self.logits: model_outputs = torch.nn.functional.softmax(model_outputs, dim=1) model_outputs = model_outputs >= self.threshold pred_counts += model_outputs.cpu().numpy() - else: - pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1).cpu()] += 1 + else: for column_pos in range(i_batch.shape[-1]): for row_pos in range(i_batch.shape[-2]): ablated_batch = self.ablator.forward(i_batch, column_pos=column_pos, row_pos=row_pos) model_outputs = self.model(ablated_batch) - if self.algorithm == "levine2020": + + if self.algorithm == "salman2021": + pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1).cpu()] += 1 + else: if self.logits: model_outputs = torch.nn.functional.softmax(model_outputs, dim=1) model_outputs = model_outputs >= self.threshold pred_counts += model_outputs.cpu().numpy() - else: - # model_outputs = torch.nn.functional.softmax(model_outputs, dim=1) - # model_outputs = model_outputs >= 0.3 - # pred_counts += model_outputs.cpu().numpy() - - pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1).cpu()] += 1 _, cert_and_correct, top_predicted_class = self.ablator.certify( pred_counts, size_to_certify=size_to_certify, label=o_batch diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py index 49168e38fa..ae8549a8e7 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py @@ -74,8 +74,8 @@ def create(self, patch_size=None, embed_dim=None, device="cpu", **kwargs) -> Non Creates a convolution that mimics the embedding layer to be used for the ablation mask to track where the image was ablated. - :param patch_size: The patch size used by the ViT - :param embed_dim: The embedding dimension used by the ViT + :param patch_size: The patch size used by the ViT. + :param embed_dim: The embedding dimension used by the ViT. :param device: Which device to set the emdedding layer to. :param kwargs: Handles the remaining kwargs from the ViT configuration. """ @@ -123,7 +123,6 @@ def __init__(self, **kwargs): """ Create a ArtViT instance :param kwargs: keyword arguments required to create the mask embedder and the vision transformer class - Must contain ... """ self.to_drop_tokens = kwargs["drop_tokens"] diff --git a/tests/estimators/certification/test_derandomized_smoothing.py b/tests/estimators/certification/test_derandomized_smoothing.py index 3cb2037d4a..41d71ddcb4 100644 --- a/tests/estimators/certification/test_derandomized_smoothing.py +++ b/tests/estimators/certification/test_derandomized_smoothing.py @@ -126,7 +126,7 @@ def forward(self, x): criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(ptc.parameters(), lr=0.01, momentum=0.9) try: - for ablation_type in ["column", "block"]: + for ablation_type in ["column", "row", "block"]: classifier = PyTorchDeRandomizedSmoothing( model=ptc, clip_values=(0, 1), @@ -137,7 +137,7 @@ def forward(self, x): ablation_type=ablation_type, ablation_size=5, threshold=0.3, - algorithm='levine2020', + algorithm="levine2020", logits=True, ) classifier.fit(x=dataset[0], y=dataset[1], nb_epochs=1) @@ -227,7 +227,6 @@ def forward(self, x): return self.fc2(x) def load_weights(self): - fpath = os.path.join( os.path.dirname(os.path.dirname(__file__)), "../../utils/resources/models/certification/derandomized/" ) @@ -268,13 +267,14 @@ def load_weights(self): ablation_type=ablation_type, ablation_size=ablation_size, threshold=0.3, - algorithm='levine2020', + algorithm="levine2020", logits=True, ) preds = classifier.predict(np.copy(fix_get_mnist_data[0])) - cert, cert_and_correct, top_predicted_class_argmax = classifier.ablator.certify(preds, - size_to_certify=size_to_certify) + cert, cert_and_correct, top_predicted_class_argmax = classifier.ablator.certify( + preds, label=fix_get_mnist_data[1], size_to_certify=size_to_certify + ) if ablation_type == "column": assert np.sum(cert.cpu().numpy()) == 52 else: @@ -360,7 +360,9 @@ def get_weights(): x = np.squeeze(x) x = np.expand_dims(x, axis=-1) preds = classifier.predict(x) - num_certified = classifier.ablator.certify(preds, size_to_certify=size_to_certify) + num_certified = classifier.ablator.certify( + preds, label=fix_get_mnist_data[1], size_to_certify=size_to_certify + ) if ablation_type == "column": assert np.sum(num_certified) == 52 diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index f4c6ac1be3..ee3d345139 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -58,7 +58,7 @@ def fix_get_cifar10_data(): return x_test.astype(np.float32), y_test -@pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") +@pytest.mark.only_with_platform("pytorch") def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): """ Check that the ablation is being performed correctly @@ -75,7 +75,7 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): ablation_size=4, channels_first=True, to_reshape=False, # do not upsample initially - mode='ViT', + mode="ViT", original_shape=(3, 32, 32), output_shape=(3, 224, 224), ) @@ -102,7 +102,7 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): ablation_size=4, channels_first=True, to_reshape=True, - mode='ViT', + mode="ViT", original_shape=(3, 32, 32), output_shape=(3, 224, 224), ) @@ -126,7 +126,77 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): art_warning(e) -@pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") +@pytest.mark.only_with_platform("pytorch") +def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): + """ + Check that the ablation is being performed correctly + """ + import torch + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator + + try: + cifar_data = fix_get_cifar10_data[0] + + col_ablator = ColumnAblator( + ablation_size=4, + channels_first=True, + to_reshape=False, # do not upsample initially + mode="ViT", + ablation_mode="row", + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + ) + + cifar_data = torch.from_numpy(cifar_data).to(device) + # check that the ablation functioned when in the middle of the image + ablated = col_ablator.forward(cifar_data, column_pos=10) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, 0:10, :]) == 0 + assert torch.sum(ablated[:, :, 10:14, :]) > 0 + assert torch.sum(ablated[:, :, 14:, :]) == 0 + + # check that the ablation wraps when on the edge of the image + ablated = col_ablator.forward(cifar_data, column_pos=30) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, 30:, :]) > 0 + assert torch.sum(ablated[:, :, 2:30, :]) == 0 + assert torch.sum(ablated[:, :, :2, :]) > 0 + + # check that upsampling works as expected + col_ablator = ColumnAblator( + ablation_size=4, + channels_first=True, + to_reshape=True, + mode="ViT", + ablation_mode="row", + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + ) + + ablated = col_ablator.forward(cifar_data, column_pos=10) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, : 10 * 7, :]) == 0 + assert torch.sum(ablated[:, :, 10 * 7 : 14 * 7, :]) > 0 + assert torch.sum(ablated[:, :, 14 * 7 :, :]) == 0 + + # check that the ablation wraps when on the edge of the image + ablated = col_ablator.forward(cifar_data, column_pos=30) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, 30 * 7 :, :]) > 0 + assert torch.sum(ablated[:, :, 2 * 7 : 30 * 7, :]) == 0 + assert torch.sum(ablated[:, :, : 2 * 7, :]) > 0 + + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.only_with_platform("pytorch") def test_pytorch_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data): """ Check that the training loop for pytorch does not result in errors @@ -158,10 +228,10 @@ def test_pytorch_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data) art_warning(e) -@pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") +@pytest.mark.only_with_platform("pytorch") def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10_data): """ - Check that ... + Check that based on a given set of synthetic class predictions the certification gives the expected results. """ from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator import torch @@ -170,7 +240,7 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 col_ablator = ColumnAblator( ablation_size=4, channels_first=True, - mode='ViT', + mode="ViT", to_reshape=True, # do not upsample initially original_shape=(3, 32, 32), output_shape=(3, 224, 224), @@ -186,7 +256,8 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 except ARTTestException as e: art_warning(e) -@pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") + +@pytest.mark.only_with_platform("pytorch") @pytest.mark.parametrize("ablation", ["block", "column"]) def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10_data, ablation): """ @@ -205,7 +276,7 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 # shutil.rmtree('smoothed-vit') os.system("git clone https://github.com/MadryLab/smoothed-vit") - sys.path.append('smoothed-vit/src/utils/') + sys.path.append("smoothed-vit/src/utils/") # Original MaskProcessor used ones_mask = torch.cat([torch.cuda.IntTensor(1).fill_(0), ones_mask]).unsqueeze(0) # which is not compatible with non-cuda torch as is found when running tests on github. @@ -235,7 +306,8 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + class MaskProcessor(torch.nn.Module): def __init__(self, patch_size=16): super().__init__() @@ -251,9 +323,13 @@ def forward(self, ones_mask): return ones_mask from custom_models import preprocess + preprocess.MaskProcessor = MaskProcessor - from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator, BlockAblator + from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ( + ColumnAblator, + BlockAblator, + ) from custom_models.vision_transformer import vit_small_patch16_224, vit_base_patch16_224 cifar_data = fix_get_cifar10_data[0][:50] @@ -318,24 +394,24 @@ def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: madry_vit.load_state_dict(art_sd) madry_vit = madry_vit.to(device) - if ablation == 'column': + if ablation == "column": ablator = ColumnAblator( ablation_size=4, channels_first=True, to_reshape=True, - mode='ViT', + mode="ViT", original_shape=(3, 32, 32), output_shape=(3, 224, 224), ) ablated = ablator.forward(cifar_data, column_pos=10) - elif ablation == 'block': + elif ablation == "block": ablator = BlockAblator( ablation_size=4, channels_first=True, to_reshape=True, original_shape=(3, 32, 32), output_shape=(3, 224, 224), - mode='ViT', + mode="ViT", ) ablated = ablator.forward(cifar_data, column_pos=10, row_pos=28) @@ -343,7 +419,8 @@ def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: art_preds = art_model.model(ablated) assert torch.allclose(madry_preds, art_preds, rtol=1e-04, atol=1e-04) -@pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") + +@pytest.mark.only_with_platform("pytorch") @pytest.mark.parametrize("ablation", ["block", "column"]) def test_certification_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10_data, ablation): """ @@ -356,20 +433,21 @@ def test_certification_equivalence(art_warning, fix_get_mnist_data, fix_get_cifa import types from torch.utils.data import Dataset - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") class ArgClass: def __init__(self): self.certify_patch_size = 4 self.certify_ablation_size = 4 self.certify_stride = 1 - self.dataset = 'cifar10' - self.certify_out_dir = './' - self.exp_name = 'tests' - if ablation == 'column': - self.certify_mode = 'col' - if ablation == 'block': - self.certify_mode = 'block' + self.dataset = "cifar10" + self.certify_out_dir = "./" + self.exp_name = "tests" + if ablation == "column": + self.certify_mode = "col" + if ablation == "block": + self.certify_mode = "block" self.batch_id = None class DataSet(Dataset): @@ -390,11 +468,11 @@ def __getitem__(self, idx): # if os.path.exists('smoothed-vit'): # shutil.rmtree('smoothed-vit') - if os.path.exists('tests'): - shutil.rmtree('tests') + if os.path.exists("tests"): + shutil.rmtree("tests") os.system("git clone https://github.com/MadryLab/smoothed-vit") - sys.path.append('smoothed-vit/src/utils/') + sys.path.append("smoothed-vit/src/utils/") from smoothing import certify art_model = PyTorchDeRandomizedSmoothing( @@ -411,13 +489,16 @@ def __getitem__(self, idx): replace_last_layer=True, verbose=False, ) - if os.path.isfile('vit_small_patch16_224_block.pt'): - art_model.model.load_state_dict(torch.load('vit_small_patch16_224_block.pt')) + + # TODO: Look into incorporating this model into the CI runs rather than just local testing. + if os.path.isfile("vit_small_patch16_224_block.pt"): + art_model.model.load_state_dict(torch.load("vit_small_patch16_224_block.pt")) class WrappedModel(torch.nn.Module): """ Original implementation requires to return a tuple. We add a dummy return to satisfy this. """ + def __init__(self, my_model): super().__init__() self.model = my_model @@ -427,7 +508,7 @@ def forward(self, x): if x.shape[-1] != 224: x = self.upsample(x) x = self.model(x) - return x, 'filler_arg' + return x, "filler_arg" def _cuda(self): return self @@ -438,6 +519,7 @@ class MyDataloader(Dataset): (such as those run for ART CI checks) the test will fail. Here we override .cuda() for the instances to just return self. """ + def __init__(self, x, y): self.x = x self.y = y @@ -450,8 +532,8 @@ def __getitem__(self, idx): if idx >= 2: raise IndexError else: - x = self.x[idx*self.bsize:idx*self.bsize+self.bsize] - y = self.y[idx*self.bsize:idx*self.bsize+self.bsize] + x = self.x[idx * self.bsize : idx * self.bsize + self.bsize] + y = self.y[idx * self.bsize : idx * self.bsize + self.bsize] x.cuda = types.MethodType(_cuda, x) y.cuda = types.MethodType(_cuda, y) @@ -464,8 +546,6 @@ def __getitem__(self, idx): cifar_data = torch.from_numpy(fix_get_cifar10_data[0][:num_to_fetch]).to(device) cifar_labels = torch.from_numpy(fix_get_cifar10_data[1][:num_to_fetch]).to(device) - # upsample = torch.nn.Upsample(scale_factor=224 / 32) - # cifar_data = upsample(cifar_data) if torch.cuda.is_available(): dataset = DataSet(cifar_data, cifar_labels) @@ -476,34 +556,31 @@ def __getitem__(self, idx): args = ArgClass() model = WrappedModel(my_model=art_model.model) - certify(args=args, - model=model, - validation_loader=validation_loader, - store=None) - summary = torch.load('tests/m4_s4_summary.pth') - print('the summary is ', summary) - acc, cert_acc = art_model.eval_and_certify(x=cifar_data.cpu().numpy(), - y=cifar_labels.cpu().numpy(), - batch_size=num_to_fetch, - size_to_certify=4) - - assert torch.allclose(torch.tensor(cert_acc), torch.tensor(summary['cert_acc'])) - assert torch.tensor(acc) == torch.tensor(summary['smooth_acc']) + certify(args=args, model=model, validation_loader=validation_loader, store=None) + summary = torch.load("tests/m4_s4_summary.pth") + print("the summary is ", summary) + acc, cert_acc = art_model.eval_and_certify( + x=cifar_data.cpu().numpy(), y=cifar_labels.cpu().numpy(), batch_size=num_to_fetch, size_to_certify=4 + ) + + assert torch.allclose(torch.tensor(cert_acc), torch.tensor(summary["cert_acc"])) + assert torch.tensor(acc) == torch.tensor(summary["smooth_acc"]) upsample = torch.nn.Upsample(scale_factor=224 / 32) cifar_data = upsample(cifar_data) acc_non_ablation = art_model.model(cifar_data) acc_non_ablation = art_model.get_accuracy(acc_non_ablation, cifar_labels) - print('acc non ablation ', acc_non_ablation) - assert np.allclose(acc_non_ablation.astype(float), summary['acc']) + print("acc non ablation ", acc_non_ablation) + assert np.allclose(acc_non_ablation.astype(float), summary["acc"]) -@pytest.mark.skip_framework("mxnet", "non_dl_frameworks", "tensorflow1", "keras", "kerastf", "tensorflow2") +@pytest.mark.only_with_platform("pytorch") def test_equivalence(fix_get_cifar10_data): import torch from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") class MadrylabImplementations: """ From afca1cc6041d266616513157a46a11e8e56f7a1e Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Thu, 3 Aug 2023 14:05:19 +0100 Subject: [PATCH 34/55] fixing bug in which tests folder was overwritten Signed-off-by: GiulioZizzo --- .../derandomized_smoothing/pytorch.py | 8 +- .../vision_transformers/pytorch.py | 1 - notebooks/smoothed_vision_transformers.ipynb | 314 +++++++++++++++--- .../certification/test_smooth_vit.py | 75 +++-- 4 files changed, 319 insertions(+), 79 deletions(-) diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index e9b6a21c36..c38c8c8eae 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -135,9 +135,10 @@ def __init__( if not channels_first: raise ValueError("Channels must be set to first") + logging.info("Running algorithm: %s" % algorithm) - print(algorithm) - + # Default value for output shape + output_shape = input_shape self.mode = None if importlib.util.find_spec("timm") is not None and algorithm == "salman2021": from timm.models.vision_transformer import VisionTransformer @@ -229,7 +230,6 @@ def __init__( self.mode = "CNN" output_shape = input_shape self.to_reshape = False - print("We are here!") elif algorithm == "levine2020": if ablation_type is None or threshold is None or logits is None: @@ -238,6 +238,8 @@ def __init__( " the prediction threshold, and ablation type" ) self.mode = "CNN" + # input channels are internally doubled. + input_shape = (input_shape[0] * 2, input_shape[1], input_shape[2]) output_shape = input_shape self.to_reshape = False diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py index b577d448cb..6e79a85465 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py @@ -30,7 +30,6 @@ if TYPE_CHECKING: - import torch from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT logging.basicConfig(level=logging.INFO) diff --git a/notebooks/smoothed_vision_transformers.ipynb b/notebooks/smoothed_vision_transformers.ipynb index c38132fbe1..325cecf976 100644 --- a/notebooks/smoothed_vision_transformers.ipynb +++ b/notebooks/smoothed_vision_transformers.ipynb @@ -46,16 +46,7 @@ "execution_count": 1, "id": "aeb27667", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - } - ], + "outputs": [], "source": [ "import sys\n", "import numpy as np\n", @@ -66,7 +57,7 @@ "from matplotlib import pyplot as plt\n", "\n", "# The core tool is PyTorchSmoothedViT which can be imported as follows:\n", - "from art.estimators.certification.smoothed_vision_transformers import PyTorchSmoothedViT\n", + "from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing\n", "\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')" ] @@ -188,7 +179,7 @@ "# additional models supported.\n", "\n", "# We can see all the models supported by using the .get_models() method:\n", - "PyTorchSmoothedViT.get_models()" + "PyTorchDeRandomizedSmoothing.get_models()" ] }, { @@ -198,11 +189,14 @@ "metadata": {}, "outputs": [ { - "name": "stdout", + "name": "stderr", "output_type": "stream", "text": [ - "ViT expects input shape of (3, 224, 224), but (3, 32, 32) specified as the input shape. The input will be rescaled to (3, 224, 224)\n", - "ArtViT(\n", + "INFO:root:Running algorithm: salman2021\n", + "INFO:root:Converting Adam Optimiser\n", + "WARNING:art.estimators.certification.derandomized_smoothing.pytorch: ViT expects input shape of: (3, 224, 224) but (3, 32, 32) specified as the input shape. The input will be rescaled to (3, 224, 224)\n", + "INFO:art.estimators.classification.pytorch:Inferred 9 hidden layers on PyTorch classifier.\n", + "INFO:art.estimators.certification.derandomized_smoothing.pytorch:PyTorchViT(\n", " (patch_embed): PatchEmbed(\n", " (proj): Conv2d(3, 384, kernel_size=(16, 16), stride=(16, 16))\n", " (norm): Identity()\n", @@ -516,14 +510,14 @@ "vit_model = timm.create_model('vit_small_patch16_224')\n", "optimizer = torch.optim.Adam(vit_model.parameters(), lr=1e-4)\n", "\n", - "art_model = PyTorchSmoothedViT(model=vit_model, # Name of the model acitecture to load\n", - " loss=torch.nn.CrossEntropyLoss(), # loss function to use\n", - " optimizer=optimizer, # the optimizer to use: note! this is not initialised here we just supply the class!\n", - " input_shape=(3, 32, 32), # the input shape of the data: Note! that if this is a different shape to what the ViT expects it will be re-scaled\n", - " nb_classes=10,\n", - " ablation_size=4, # Size of the retained column\n", - " replace_last_layer=True, # Replace the last layer with a new set of weights to fine tune on new data\n", - " load_pretrained=True) # if to load pre-trained weights for the ViT" + "art_model = PyTorchDeRandomizedSmoothing(model=vit_model, # Name of the model acitecture to load\n", + " loss=torch.nn.CrossEntropyLoss(), # loss function to use\n", + " optimizer=optimizer, # the optimizer to use: note! this is not initialised here we just supply the class!\n", + " input_shape=(3, 32, 32), # the input shape of the data: Note! that if this is a different shape to what the ViT expects it will be re-scaled\n", + " nb_classes=10,\n", + " ablation_size=4, # Size of the retained column\n", + " replace_last_layer=True, # Replace the last layer with a new set of weights to fine tune on new data\n", + " load_pretrained=True) # if to load pre-trained weights for the ViT" ] }, { @@ -533,11 +527,15 @@ "metadata": {}, "outputs": [ { - "name": "stdout", + "name": "stderr", "output_type": "stream", "text": [ - "ViT expects input shape of (3, 224, 224), but (3, 32, 32) specified as the input shape. The input will be rescaled to (3, 224, 224)\n", - "ArtViT(\n", + "INFO:root:Running algorithm: salman2021\n", + "INFO:timm.models._builder:Loading pretrained weights from Hugging Face hub (timm/vit_small_patch16_224.augreg_in21k_ft_in1k)\n", + "INFO:timm.models._hub:[timm/vit_small_patch16_224.augreg_in21k_ft_in1k] Safe alternative available for 'pytorch_model.bin' (as 'model.safetensors'). Loading weights using safetensors.\n", + "WARNING:art.estimators.certification.derandomized_smoothing.pytorch: ViT expects input shape of: (3, 224, 224) but (3, 32, 32) specified as the input shape. The input will be rescaled to (3, 224, 224)\n", + "INFO:art.estimators.classification.pytorch:Inferred 9 hidden layers on PyTorch classifier.\n", + "INFO:art.estimators.certification.derandomized_smoothing.pytorch:PyTorchViT(\n", " (patch_embed): PatchEmbed(\n", " (proj): Conv2d(3, 384, kernel_size=(16, 16), stride=(16, 16))\n", " (norm): Identity()\n", @@ -846,15 +844,15 @@ "source": [ "# Or we can just feed in the model name and ART will internally create the ViT.\n", "\n", - "art_model = PyTorchSmoothedViT(model='vit_small_patch16_224', # Name of the model acitecture to load\n", - " loss=torch.nn.CrossEntropyLoss(), # loss function to use\n", - " optimizer=torch.optim.SGD, # the optimizer to use: note! this is not initialised here we just supply the class!\n", - " optimizer_params={\"lr\": 0.01}, # the parameters to use\n", - " input_shape=(3, 32, 32), # the input shape of the data: Note! that if this is a different shape to what the ViT expects it will be re-scaled\n", - " nb_classes=10,\n", - " ablation_size=4, # Size of the retained column\n", - " replace_last_layer=True, # Replace the last layer with a new set of weights to fine tune on new data\n", - " load_pretrained=True) # if to load pre-trained weights for the ViT" + "art_model = PyTorchDeRandomizedSmoothing(model='vit_small_patch16_224', # Name of the model acitecture to load\n", + " loss=torch.nn.CrossEntropyLoss(), # loss function to use\n", + " optimizer=torch.optim.SGD, # the optimizer to use: note! this is not initialised here we just supply the class!\n", + " optimizer_params={\"lr\": 0.01}, # the parameters to use\n", + " input_shape=(3, 32, 32), # the input shape of the data: Note! that if this is a different shape to what the ViT expects it will be re-scaled\n", + " nb_classes=10,\n", + " ablation_size=4, # Size of the retained column\n", + " replace_last_layer=True, # Replace the last layer with a new set of weights to fine tune on new data\n", + " load_pretrained=True) # if to load pre-trained weights for the ViT" ] }, { @@ -863,7 +861,7 @@ "metadata": {}, "source": [ "Creating a PyTorchSmoothedViT instance with the above code follows many of the general ART patterns with two caveats: \n", - "+ The optimizer would (normally) be supplied initialised into the estimator along with a pytorch model. However, here we have not yet created the model, we are just supplying the model architecture name. Hence, here we pass the class into PyTorchSmoothedViT with the keyword arguments in optimizer_params which you would normally use to initialise it.\n", + "+ The optimizer would (normally) be supplied initialised into the estimator along with a pytorch model. However, here we have not yet created the model, we are just supplying the model architecture name. Hence, here we pass the class into PyTorchDeRandomizedSmoothing with the keyword arguments in optimizer_params which you would normally use to initialise it.\n", "+ The input shape will primiarily determine if the input requires upsampling. The ViT model such as the one loaded is for images of 224 x 224 resolution, thus in our case of using CIFAR data, we will be upsampling it." ] }, @@ -883,7 +881,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 6, @@ -902,7 +900,7 @@ } ], "source": [ - "# We can see behind the scenes how PyTorchSmoothedViT processes input by passing in the first few CIFAR\n", + "# We can see behind the scenes how PyTorchDeRandomizedSmoothing processes input by passing in the first few CIFAR\n", "# images into art_model.ablator.forward along with a start position to retain pixels from the original image.\n", "original_image = np.moveaxis(x_train, [1], [3])\n", "\n", @@ -932,7 +930,7 @@ "metadata": {}, "outputs": [], "source": [ - "# We can now train the model\n", + "# We can now train the model. This can take some time depending on hardware.\n", "from torchvision import transforms\n", "\n", "scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1)\n", @@ -941,8 +939,7 @@ " update_batchnorm=True, \n", " scheduler=scheduler,\n", " transform=transforms.Compose([transforms.RandomHorizontalFlip()]))\n", - "torch.save(art_model.model.state_dict(), 'trained.pt')\n", - "\n" + "torch.save(art_model.model.state_dict(), 'trained.pt')" ] }, { @@ -957,7 +954,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "Normal Acc 0.891 Cert Acc 0.684: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 79/79 [01:09<00:00, 1.14it/s]\n" + "Normal Acc 0.902 Cert Acc 0.703: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████| 79/79 [02:06<00:00, 1.61s/it]\n" ] } ], @@ -966,6 +963,237 @@ "art_model.model.load_state_dict(torch.load('trained.pt'))\n", "acc, cert_acc = art_model.eval_and_certify(x_test, y_test, size_to_certify=4)" ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "a2683f52", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Files already downloaded and verified\n", + "Files already downloaded and verified\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:Running algorithm: salman2021\n", + "INFO:timm.models._builder:Loading pretrained weights from Hugging Face hub (timm/vit_small_patch16_224.augreg_in21k_ft_in1k)\n", + "INFO:timm.models._hub:[timm/vit_small_patch16_224.augreg_in21k_ft_in1k] Safe alternative available for 'pytorch_model.bin' (as 'model.safetensors'). Loading weights using safetensors.\n", + "INFO:art.estimators.classification.pytorch:Inferred 9 hidden layers on PyTorch classifier.\n", + "INFO:root:Running algorithm: salman2021\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The shape of the ablated image is (10, 4, 224, 224)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:timm.models._builder:Loading pretrained weights from Hugging Face hub (timm/vit_small_patch16_224.augreg_in21k_ft_in1k)\n", + "INFO:timm.models._hub:[timm/vit_small_patch16_224.augreg_in21k_ft_in1k] Safe alternative available for 'pytorch_model.bin' (as 'model.safetensors'). Loading weights using safetensors.\n", + "INFO:art.estimators.classification.pytorch:Inferred 9 hidden layers on PyTorch classifier.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The shape of the ablated image is (10, 4, 224, 224)\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnYAAACbCAYAAADvEdaMAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA71klEQVR4nO29e5Bc9XXvu/aj39OPefWMRho9kYSExMNCjwEfh8S6yJjEJpbrxLdcAae4pkxG3MJKOYlSjl2hUlEdn5wyZUeGuqkEkrqhcHFywY6MyeFKtmSMACODQW+EHjN6zGhGMz09/e7e+3f+6NFev9UI2xLz6On5fqpUWr336r1/e8/u1b/+rZehlFIEAAAAAABmPeZMDwAAAAAAAEwOmNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNiBhmLXrl20ePFiCgaDtHHjRnrjjTdmekgAADAlwN6BqzFlEzs8cGC6+f73v0/bt2+nb37zm/TLX/6SbrnlFtqyZQtdunRppocGGhzYOzDdwN6BD8NQSqnJPuj3v/99uv/+++nJJ5+kjRs30uOPP07PPfccHT9+nJLJ5K99r+u6dOHCBYpGo2QYxmQPDUwiSikaHx+nrq4uMs2ZX/zduHEjrV+/nv7hH/6BiKrPUnd3Nz3yyCP0l3/5l7/2vXjuZg/19tzB3s0N6u25g72bG1zXc6emgA0bNqje3l7vteM4qqurS+3cufM3vre/v18REf7Non/9/f1T8RhdE8ViUVmWpZ5//nmx/f7771ef+cxnPqBfKBTU2NiY9+/IkSMzfh/xb/Y9d0rB3s21f/Xw3MHezb1/1/Lc2TTJlEolOnjwIO3YscPbZpombd68mQ4cOPAB/WKxSMVi0XutJhYQ123YSLZt09jYqNAPmK4nN/uVJy9oDgu9thZ+3RqPeLLf9Ak9KxDSXlieOJoaE3rlCp8rEY/ztTlleT0lvpZCgeVgKCD0HHI8OZ/Pin2xeJRfKNYrleS5LOI/n6WNvSnSJPQiYb4Xti/I4yuWhJ4ytF8DJh+7VJJ6FWV47//r7/wbRaNRmmmGh4fJcRzq6OgQ2zs6OujYsWMf0N+5cyf9zd/8zXQND0wB9fDcTZa9+2//9AwFw2G6cOJtoT989rgnOw5/JpMLVgi9BUtWenKiY4EnB0PSxJ88+ron95065MmVjLRBlnauaCLmyXZA2tl1m+7w5KU38JgKaWm3jx55x5NdV9qTcqXgyceOHvHk8bHLQk+3rZWyZqtH8kIvk+PjVRw+V1tbs9BLNPP3gqsy/J6KUKNCvvo3Kpcr9PJ/7q+L5w72bu5xLc/dpE/sJuuBs22bbNsWExYiIsvkZWPb4smW3yf1Aj6+tKCfJ3N+S07s7ID22uL35P1SzzT5XEHtPaYj1MggnniSyzuDNcdztPBG15F/Bv34pFjPJCX0LGI9/T6FAvJcoaDfk30+lmtX4D9sYmfV6F2Z2PFxZt9S/o4dO2j79u3e63Q6Td3d3TM4InCt1MNzN1n2LhgOUygcoUAwKLb7/fx51Sd2tXoh7cdbWPthVzuxC4b4h2wgwD82zdofjfq5ND07KH+ghiM8OWrSvnhsVx4vHObzuq601aUy/x0DAb7eYo3NVJptNYiPYdvyXLatXbPBNtjnk/fCrx3f0dY4ah8rpyLtbj08d9cK7N3s51qeu0mf2F0rH/bAHTt2lAzTpNTwsNBv0eyZ0cov2hw5mzVCHNuSdUc8OePID6ky2JDkCvzrLpcvCr2yw0ZlWJvpBG15vEqF9Szz6saxei7+hVyp+QVrFFo92dRsYLkoxxSy+foz2urbiCN/cobDbHwNbcXSqJnkkua/zxXYWFbKNUbfrl5LsVzz03YGaWtrI8uyaHBwUGwfHBykzs7OD+gHAoEP/E0AmGo+zN6Np0apXCxSa6JF6Kt2njAqm1fO5i1cKvQcbSJlujlPdnPyM1oY5VUwleeVrfltMhZwYfcNntx9wyJP7pq/QOglkzw+n48/T5WEXNnrXsCfwUpF2rtCgVfcUqO8cjY8PCL0bL9u/NkwNrfKz3Ewwscb01YOA0H5decqvjc+m4+RHksJvVKxauMrsHdgljDpEaDX88DFYjHxD4Brxe/307p162jPnj3eNtd1ac+ePdTT0zODIwONDOwdmAlg78CvY9IndnjgwEyxfft2+sd//Ef6l3/5Fzp69Cg9/PDDlM1m6U/+5E9memigQYG9AzMF7B34MKbEFbt9+3Z64IEH6Pbbb6cNGzbQ448/jgcOTDl/9Ed/RENDQ/SNb3yDBgYG6NZbb6WXXnrpA/FPAEwmsHdgJoC9Ax/GlEzsJuOBC9oGmaZBVBMWsEiLq1vcwdmpyXYZmxLS48q0oMN8sSD0CmUtQ03T82tBxkREpAXQKpffE2+RsSSVsp7QwcdwapIsLD9fWLEkx1Su8DjCmp4dkWMKavsqBsfsmcoVehXi4+mJEE0ROfZMlmNzyhUtZqcmZnM8Xc0YLpVrLqoO2LZtG23btm2mhwHmEJPyBVsuE9llKhVlPGsux/Foi1fM9+RMVmaxlspsQ1ra2C7aPumUWb6cM1fv2HS7J8/vkLFz8Xg7D83mz3m4JnlCDzE2tHTSfDYj9IpanG44JO1Oc4Lj+5YtXe3JR48eF3pk8DGKRbZV8ZjMdtXyw2gszS5yRTK2z3V58KOjfD/zORnLfKXSa8Wpnxi7K8DegasxZckTeOAAAHMF2DsAQL0w8+WzAQAAAADApDDj5U4+jKDhkGm4FI3KIa6Yz8vurSFOefe50p2ZGeFld8fl+Wu+Jv3f1JbtYwmu/2T7pcshNTbO+7QhtUSlW2E8zUv6Ja2kSb4gXSxKc482abWgiIjKJU7XN7V6Ur6adHVHK45saz7WYo07x6/5JkyXr7+YkUVESSsFE9DKrFRc6dody1ZdFaWK3A4AuD4qhQJVDIOMigxvCPg5/GJMK/3U2ildpwtv4vIkye4uT9brVlZPxLZBFAa+KIsB504NsZ7JtvT4u78SeutXsev0ExvWe7Kq6VSZTnPB976zF8Q+v1Y03e/nLOG29vlCr6//PdYLst3N1BR4T6f5Ptk+touxmLTV+Ty7c3Uva6XGrnm19eQlAVC3YMUOAAAAAKBBwMQOAAAAAKBBqFtXbCJgkWWaFKpxP8a1zND2mNYSxpUuDP2VZWt+RVPOZYtaxXa9FY1dk1nqFNk9qiw+xqVLKamnZYqO53ipP+fIjKymkFaYtCjHbmmtc0yD1/+tgGwjlM+yKyXs0/o51rhBClpHjbxWPd2t8S2kMny8VI7vS6a2en25ev0VB65YACaDYj5HhnKpKSQ/47EWzk792C23enL30uVCb1zLSD1+qt+T05oNIiLKpFKefDnF7teLAzIsI6ZlxZLJWaK7v//vQs/3X9kW/k7Px3m7T4aDdHaye5iU7CaUGuUwl1++xT1lbZ+0/ZEo27iKFjZSyqSEnmaeqV2rluDU2ODLIzwOk7R+2rb8WkwkqlnG5ZoOPADUK1ixAwAAAABoEDCxAwAAAABoEDCxAwAAAABoEOo2xq4tHiTbMinqs8T2YJBfmxbHWYRqOkWUtbIBrlZaRCkZZ1HSOko4JY6hcFVNeRItPkPZXEJgvCRT7R2Hx5fTYtBq49HGs3z88yPyGD6TdWMZHnt5QMam5Mc4fmZhm1buIClLIRhRLjVQHOW4mkxGnndsnGPshsc4pvBM/5jQc6zqY+Mq5P8DMBkEAjYFAj4qW1GxPR/iEkyn0/yZfPuVN4TeyGXu9HD+Andb8FmybYxuW4oVtml6HC4R0bx2/mq4NHDWk2MBWT5lPJX25BOnT/P757XJ8/r4ePO6O8W+Lu113wDHBx5/t1/oJedx3N+ZPs0WlqVtdUv82tG6ZgRrSlgFbI7RzhdYLxaLCT3brr5PuVgHAbMDPKkAAAAAAA0CJnYAAAAAAA1C3bpiO9vC5LctivllqY2mMLsCDOEulW5BQytXUtQqjJskXROtUW6YHYlwqYH0mHR7xrXl+XGti8TZ81IvU2RXrF/zEMwPy1tt+zRX5+WU2FdUWkcNrdxJPCbdNHes5ibe6YvsSlA5eS/ibexyKOZ4HJmMnNcHfKzX3cnnSiZlM/PBdNVlW3Fc6jt0jgAAH41QKEmhUJgupaS9O9nP7sgjhw95sumT9sTRus3kxznEwjKlmzJfZNdpapzl8WxG6J05d9STIyG2BSuXrZQD19y5P//ZTz150ZIlQm3FyhWe3NoaF/sCQb6WeIzdpWZFhoBki3oHIS7Bkk+NCz3H4ZCSYIhtWiYt9WJa+ZSAFuJTKskwnNxEyZhyWf5tAKhXsGIHAAAAANAgYGIHAAAAANAg1K0rtrkpRAGfRXYpJbYHNBdEOMDVwot5uXxe1prdJxLNnlzbnLrk8Ny2XNY6OTQ1Cb0LQ7z0//5ZdhEMjcvleb1Jw6IQL+/f919uFXoL5vHx/+fBU2LfgZMDnlxx2dVhm3Ls4ylu1J3L8PiiUZ/QI4fdz8Eg7/MHZcZx2OB9Fa0r9kKtqTgRUXSk6tIolR3aD1csAB+ZRHMrhcIROtl/Qmy/eIYzTcM+/oyPZWWniEz6kicbLrtfU+PSxZrKs42zA/x5b+tICr2QFqIyf/EtntxdYzNO/+qAJ1sG26qyI7vpDA1zNv7atavEvhuWL+Xja5mvTZtuE3rvHOvz5GKBw2aKvpqsWGIXq6vYjg0MXBB6fq2rUbxZv35ZLSCfr4bNwBULZgtYsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBDqNsauvbmFgn6b8iMFsd00tHIdOS3FvyTjH2xD6wBR5niP2plsvsxxIYlmjs0oOTKe7dQ5js8YSWulRWxZid2y+AyxIOslbZlqHxzheJnlMVmJ/WILH2MwxbEzxZysDv/WCY7HMSscZ1KOyMrpFNfKlZhaaYF4WKhFXb7mgpbyr0ppobe4PTKhg5gTACaD06cPUiAYpGPvnxTbL1x835MdrYxJNB4ReiuXL/bkNavWePLFobzQOzvEx2jvZLuwaJksTxJt5ZizwVF+jxo+LfT6znLc21CK4+hWrRZq9H+s4Li6bEaOydXC8VSJbdzh1w4IveUrb/XkjvkJT37tjf1Cb2CQ7ZUeF1fIS/s5Oso2OdTEx3OVjNnL5qrXX6nIuEEA6hWs2AEAAAAANAiY2AEAAAAANAh164pNtLZRKOCj5qaQ2G6anKKfSnPKf7mmcrqppdu7xEvrqqZie1MTp82XieWjp2TZgWyR3RHBIKfJB/3yeKEIuzebLXYDHDw5KPQqJX5fMS5dse3NPA5DS90vV6RbOldil0ZW6zZRqkgXqaG5m/XGGz5TduFQptbxwubxVYpFqTfhplY17moAwPXxi5//hGyfTXaH7OywbNVaTw5pze1XrV4u9FauWODJToE/x8qUbs8scacc28d2xrISQq9cYRuXHR/x5HhN+EVFswF9l9geB5vOC714jEtOLV22WOxT2vpCPsVdgo69/rbUy/P1r9nyKU9ee/NSoZd/k12x758848nhsCxhFU+0aq/4+yKdlqVkisXqmOCKBbMFrNgBAAAAADQImNgBAAAAADQIdeuKJdMmMn1k+HwfqhLQuiiESWaJ2dqc1TS17hIkM54CIa6wPjzAWVK5Ybkcv7RFq3SueUSDEZlZunLZfD6vplix5HXoy/22JZtdR/18La3Nyzx52fKFQu903y88+dgJdn347RrXqWI3daXCf3KzJqPX5+cxulr1epeky9YwTPE/AOCjMXT+MlmWRbfdcq/YHghwJ4YWrenDvC6Z+T6SYtvVf5JdpyU3IPRMg92Jls2fcUdJm0GanXCK7M5VjrSfTfE2T76c4XAV0y/tsSs6/tSEcGiHbArydS3u6hZqQYvfZxLbtLVrZEZvIpHw5B/m/5cnD1yUNn1+kjvqOAbbal9NuE46XXXtVjNsZYgOAPUIvpkBAAAAABoETOwAAAAAABoETOwAAAAAABqEuo2xKxQqRMogo5yv2cPp9tksp7WXynKOWjE5Ji6T4/iTdE52gJjfzbdAVXjfojYZV7asi+PPcgXeN3/FLULPrzhWY3SMuzeERGo9EV3mgJnuznliVyrLsSpLb+SyBrFmGc8Xa+Zq7qNDPPbRMRmz59PiXUzFMTdlV6bva2F15GgV22uqopCaiJdRCuVOAJgMQpFmsm2bfDUfqZTWeSbQkvDkXEXGuhW0uN9Qc5Tf49Z8eAt61xxtczkn1IIhLRbX4HJJrllTLqqV49T8imP7rFCz0FN+tneuIc9lOJp9svj4voiMAQ418etKke3d5fOylFRrhOMSP/vpLZ785q/OCL2M1omiUBzy5GJefuckogkiIipp3XgAqGewYgcAAAAA0CBgYgcAAAAA0CDUrSvWMRxyDJOUIyud6+6/UJC7UjRFpZvygtb8+vQ5Xma3a3wd/sELnlwYZL3lSVme5JN3sUv0/fPscojObxd6ba3cReLSELsIEgmZ/m+6fHy/1vGh+j4uXWIHU548lLoo9M5f5JR/n4+vPxGTbpp8nq9Z2TyXN2p8rK7mmjUNQ9OT8380nABgcunsXkQ+n/8Dn7VCgcNNBtNsrv2JNqFXrrCbUi8Rlc/Ijjxlxce3bQ7LqFiyLEo4xmVHkq0pT1Yj0k1Z0kI2DJePHQrVdAzSTJyrpE13tC5Bpk/rmmHJe5HJsvvV0OJGAjX3LK3Z3VC4xZM/0XOz0Dv+/llPPnRkgM+Tzgo9/0SHjnJZjhuAegUrdgAAAAAADQImdgAAAAAADULdumLj8QiFgn6q2HL5O5Ph9C9V5iX8sXGZCXq2b1B7D7sjQkE5l714ml0dHUF2Z8yfv0joJbq4urlvXHN1BqXLdsEtG3jXALtUQ5UhoecQX0c2WxD75oXZvVvSKr0bEdnEekGEM9KiCXYBj18eEHqXBi97ctng8RZKNdXmTfaxRgKcVVzKS3fOlQ4VTk1HCgDA9aEMi5RhfcDdlxtn92NAc2+Op0eEXqnAn+Vcmt/jq/mIRiPscm1vZjdlrEWGirQn+FyOzd158gE5vpFFbIOKjhYqUpNl61S0zNqaTF3H1Gyc5opNtMjMWtfhY+pZ+/G4dPv6DbZjqfGUJ6uytGO3rmKbmYjyfdm9+38JvaHBYSIiqlRkFQEA6hWs2AEAAAAANAiY2IEZZ//+/fQHf/AH1NXVRYZh0AsvvCD2K6XoG9/4Bs2bN49CoRBt3ryZ3nvvPaEzMjJCX/ziFykWi1EikaAHH3xQrNQCAEA9AHsHphpM7MCMk81m6ZZbbqFdu3Zddf+3vvUt+s53vkNPPvkkvf766xSJRGjLli1U0KqyfvGLX6TDhw/Tyy+/TLt376b9+/fTQw89NF2XAAAAvxWwd2CqqdsYu8zYCFUKPrJLslOEz9DmoloKvW3JkiG5DMfcNUc5fiQRCQq9/CjH2CW7uDvE/Jt/R+gdOscxIidOsnzHvBahl0rxvo5l3JXCJBlzUtIqnSeULE+SvsQxcSGt2vm8lppzORwX4ruZ41HyNWVRfv7iDz35XD+f1/LL+EDSYua0CilUrpn/m+XqmAqTlP5/zz330D333HPVfUopevzxx+nrX/86ffaznyUion/913+ljo4OeuGFF+gLX/gCHT16lF566SX6xS9+QbfffjsREX33u9+lT3/60/T3f//31NXV9YHjFotFKhY5LimdTn9AB4Bpo1IiMohstyQ2xzVz1R3nz+eNSxNCr0kr/WRpNjKbTgm9Qo7tYijCtmXlcmlbuhct8GTTx/HGmZQ8Xvc87pqz8jR3yYi1SDvb0szlU2xbdpRwNVujNDMejMgSVpWC1g1He4+vtkQM8ee6tY3jkjM5aYOzKY5Fnt/Occ33/cHdQu+FH/3/RDR55U5g78BUgxU7UNecPn2aBgYGaPPmzd62eDxOGzdupAMHDhAR0YEDByiRSHhGjoho8+bNZJomvf7661c97s6dOykej3v/uru7p/ZCAADgNwB7ByYDTOxAXTMwUP1V3dHRIbZ3dHR4+wYGBiiZTIr9tm1TS0uLp1PLjh07aGxszPvX398/BaMHAIDfHtg7MBnUrSvWNIgsg8ipKbWhNHehSbw07hjSFTuq9WtOp7XOC0Xp6pgXZzft+t/9XU9esHKT0Pv/nvpnT+7Uyo5YJVmJ/fyp91lv6WpPDrbeIPQiil3MuZFLYl/IZbdqKc/ug+Fx6UpItHMJltbOxZ6cz8SEnqm9dPwcp1HbeaJc5ntjaKn9hpJp/pVK9bEpz+IWFIFAgAKBwG9WBGAauHPDrRQKhmjp6lvE9gvnuWTS/C52l65YvkzodbbzF72l+HM9rpX7ICIqamVI9M9/U0SWO2lqYleq5Wc3r6/GVZzPcmjHx9awy3bxisVCr+yyQVY16wkVl+24snhMlk9+PZULbG9czS1q2vJ4RlCza9q+Yrks9GyLQ1GcUsqT29tkWamP/5f1RESULxTp+R/+hGYjsHdzC6zYgbqms7Naa2pwcFBsHxwc9PZ1dnbSpUtyclypVGhkZMTTAQCAegf2DkwGmNiBumbJkiXU2dlJe/bs8bal02l6/fXXqaenh4iIenp6KJVK0cGDBz2dvXv3kuu6tHHjxmkfMwAAXA+wd2AyqFtXrKGq/5ya5XO9Sba+Aq/yNXpaomlLK2dXdYZlZtPHbl/hyavuYPfr6CXpAg5UOJts6QLOGHMNmdHameTsKj2LK5eSLoxShfeV8/LP4BC7At4/f86T3z30ptC7YxMfs7WTM3rT4/LXnE9LLmtbzC4XtyabzCmxy7WiuazHhlJCrzhePWCxPDmV2DOZDJ08edJ7ffr0aXr77beppaWFFi5cSI8++ij97d/+LS1fvpyWLFlCf/3Xf01dXV103333ERHRqlWr6FOf+hR9+ctfpieffJLK5TJt27aNvvCFL1w1QwyAeuO2m1ZQJBKhm26Trtj8Gna5RuIcUyGtDpEytBAVzcXYEpErOEr7yOuffteVR6zoGaCaDS4WZejJshsWenLIz7Yln5WdgJSp2ThD2juldYpwFcuOIUNFXC19tpTncTiudCObth6uw1c5flmGspw9zXFmd378Nk/OlWUlhvCEa9dQk9NpB/YOTDV1O7EDc4c333yTfleLb9y+fTsRET3wwAP09NNP05//+Z9TNpulhx56iFKpFH384x+nl156iYJBjgP6t3/7N9q2bRt98pOfJNM0aevWrfSd73xn2q8FAAB+HbB3YKrBxA7MOHfddRcp9eGJGIZh0GOPPUaPPfbYh+q0tLTQM888MxXDAwCASQP2Dkw1iLEDAAAAAGgQ6nbFzq045Fom5Ysy9sOvlRqxbY4lsUwZw3ZDJ5cMCYZ4/rp4kSzMeMvHeUl83sqbPfntA08JvYXdfLzOm9byeNpl2QE7HPfkXIHj9PJpGbcxeIHjO0YHz4l9jlaSIBTl5fe2Ntkpov/CW57cMW++J1dyNSVi8lxx3MiO8nmUjJfRY11CAT6Xv1OeNx2oxpoUSpMTcwLAXCcYiVAoEqGmoCxJEQlrJtrmkk5uzYKPocfYabJb09XGLbvaPj6IURNvW9Gi+PSqSMqQek0JLsFScfg9jivLT5HLB1EkY3NN/QQOy44t7Y4i7aIrWmkmVx4voJ3b5/B4IwU5JjXI9m/oFGehLli5QOgNmxP21Jy95Z3A3AIrdgAAAAAADQImdgAAAAAADULdumJ9lk0+y6bRmm4LToGX6kNhrfF1zTJ5Uitx0n8x5cnLPvYpobdgrf6a3a3l8azQi0fZxdq+4lZPztqyefbht37hycU8HyNd04x7+Hwfj92RbuRgkP8s85ewi/XmFbJ7RcXiNH+flWDZX1NhvcDdJnJnuZK9W6npKKFN8zMWuy3CrbKcQEdXtbRKvjA55U4AmOs0xZop2tREypLux5xWdkhpTdyLNR10shm2NSWtg0yxKG1BpcLu0rJWxkTvOkNElMux3c1lOYykUlMWJdrCdjEaT3hyItom9IJ+vyc7Nd0ryNC6SGjdhKJaGAoR0eVL/L6C1pHI1Tr1EBEZxOdyHb5nsah0cy9ayG278jm+f8qVJbHi0ar981k17mUA6hSs2AEAAAAANAiY2AEAAAAANAiY2AEAAAAANAh1G2NXKhTJdB0KB+QQjaCWym5yLIRyZFxEqIn1PvNHn/HkO+75pNCLtXGcxeCpo55smfJ4qXFukTN05rgnXxiXcWY/feEFT24KcbxMoShLkHR2cGxKLCpj2E6f41IoJW0cLV2Lhd6Ktev4hcPxIyMpWT4lp8Uljub5eIaS97aQ5/iZjFYKQWUKQm9VYkK/JlQGAHB9/OjFlykYDJLj+5nYPjrKZTgyY8OeXFt5Q4+50xvIOzV1UVrak57c3MZtCAOWtAXZkZQnn3iP7WI6I+1Y95JFnmz52N7Foq1Cb8kSbj22oFu2OVuylOOIWwJsq6JBGW/oai3VSIt3K9fYfkvrNWlpx+tYXBP3F2ObWVZsxy2/UKOWlup5AwE5HgDqlWtasdu5cyetX7+eotEoJZNJuu++++j48eNCp1AoUG9vL7W2tlJTUxNt3bpVGBoAAJgNwN4BAGYj1zSx27dvH/X29tJrr71GL7/8MpXLZbr77rspm+WMoq9+9av0H//xH/Tcc8/Rvn376MKFC/S5z31u0gcOAABTCewdAGA2ck2u2Jdeekm8fvrppymZTNLBgwfpE5/4BI2NjdE//dM/0TPPPEO/93u/R0RETz31FK1atYpee+012rRp0299LleVqlXTa6qKG1q6fkVxur5hSJdDMMDL9reuY5dlwCeX04+8zd0bRi+878nFonQ/jo+OeHL/ySOenFEhoedz+H1NWqX4WFC6W9ub2RV7cXBA7KtoZQhy4+z66D/dR5LDPI4MlyQI2vJeVALsfrlc4fsSCslyAuEoX0vIZjfFeC4tjzdRDqDiotwJaFym09795Gevk237KLFgpdiuHP78v/XqTzx50QLZHaGtlV2f58+xPan9jIZbEp5cMtmWDmrhH0REn9zQ48m33nyTJ+dq7KLp46+Q031nPfnEe+8LvXcPsZ1NxJvEvq2f/0NPvvOmFZ7sV3LdYcE87hpU0lyxhik74OgdNcpalwvTrulQkWD7F9I6b7iWjDG58o1h123gEgCSj5Q8MTZWjTtraanWcjt48CCVy2XavHmzp3PjjTfSwoUL6cCBA1c9RrFYpHQ6Lf4BAEC9AXsHAJgNXPfEznVdevTRR+nOO++kNWvWEBHRwMAA+f1+SiQSQrejo4MGBgaucpRqHEs8Hvf+dXd3X1UPAABmCtg7AMBs4boXl3t7e+nQoUP0yiuvfKQB7Nixg7Zv3+69TqfTE8bOJSKX3IpcFrd93FHC0TonlEhmRnXEuRr5f/5wtye3dBwWekl9eT/Hma8+n6xS3hRhF6ZtshsgUuPa7UyySyQ/PurJIUse7/IQZ7iVS9JFEA2yS7SkZaG999abQu/isROeXKxwQ2vyyQrpjj7eBZpLOCLvrRlgN0tQq77eTNLdvOqmJURElMuXiehXBECjM9X27r7P/58UCoUpkFwu9HPjPEF8713+rM3rlBNCU3MlhoJsq0puXuitWMPHb57HIRq5Ntm94ffv4VVIPUQjW+OKdTUvaEWxa7dQkXqXLnEoy9nTF8S+cJjHO3DusiefOfye0DO1DjqnBi558oa7bxd6ixZ3ebKeMWsGa9JdfWx3Db3bhCHtsd+oXpffV5OKDECdcl0Tu23bttHu3btp//79tECL9ejs7KRSqUSpVEr8ih0cHKTOzs6rHIkoEAhQIBC46j4AAJhpYO8AALOJa3LFKqVo27Zt9Pzzz9PevXtpyZIlYv+6devI5/PRnj17vG3Hjx+nvr4+6unpqT0cAADULbB3AIDZyDWt2PX29tIzzzxDP/jBDygajXpxJPF4nEKhEMXjcXrwwQdp+/bt1NLSQrFYjB555BHq6em5pgwxAACYaWDvAACzkWua2D3xxBNERHTXXXeJ7U899RR96UtfIiKib3/722SaJm3dupWKxSJt2bKFvve9713zwFzXINc1yG/LeLGgzXEcpKW5K0uWE3FLXDJkeJjjVDJDMqg5VOasNJf4XC3NsnJ6oqvdkytO0ZPPX5DHU8RxGKbJt7dUqamObnBsXiQYFvu0ii5k6S9qSro4JY4JNLVgl3RuVOiVAhxnE+3isWdDKaE37nLMXSHLi7mtsaVCr20ijjCbResJ0LhMp70L+EwK+E06ceyQ2J4eY/ui9DIeJfnZy2S4tp5hsC0I1nRLKOe4LNLYEB9vsE+WO/nxf/7Yk0fHtfdkxoReNMbxcfHmFk+OxKS7+dw5jqtLts0X+4IxjvX72Y/4vCPvvSP0HM2mnxzgItDnsuNCb/kqjiOMx9i2xrUSU0REoTCXO4lH+D75gvI7JxyuXktJt8UA1DHXNLHTDcuHEQwGadeuXbRr167rHhQAAMw0sHcAgNnIR6pjBwAAAAAA6oe6raVtGgEyDZuCAVlqQ2llTSIhXmaPRGWD51yZU+Nbo5zmbteURSmN8ZK+a7JezieX3Ts6OHDa1dwgK2+WFeBf/QkHUpdUzpN9hqyOns/wvlg0Jvb5tRLnlsHjyBRkCYHTF9nlmkrxdRWNrNBrX8Hz9/kJrZSKkun/o8M8Jn9BcxXPl27pfK5aDiCfR+cJACaD8ZFBquRDtPcHPxLb+wfOebJZ5pCKd96pKWys2ZeKHvZhSDv28u69nuzXSjrdetvHhF7JH/XkdJHtwqm+S0Lv8uWj/J4Cn+vCwBmhd/oM691+2zqx7//u5fIvb7zGhZ0rY5eFXrrIYSR5LeTl1JvSjfyzgxc9OWKz+9bnly5WS8tOjmqu2AWLFgu9z279AhER5XIodwJmB1ixAwAAAABoEDCxAwAAAABoEOrWFeuzDfLbJuW05XciIivI2a+u1s0hV5YV1i2tSnjAz+5Hn09mz/rDnCkVj/G+gaFBoZebzy7XZPcNnnz+0rDQu2n9nZ6cGeJMsFMnZMeLbCblybYlxx6Ps2vWIHZvXDwvK7b3ndWyYgM89liHzLJtb9GOp7lzjRF5L5pH+XGYn+QMtwUJ6W4+eaSaqZcvlAlcHZ9t0v+15SaKhqouHtP0kTnRAcQ0bQpMZEIbBlEoGCBzIsP73/e9Q6+8c2pmBg1mjM5kB4XDEVq+WNbKU9rn3zZZtmpCO0yLf6Mrl22fPyg/4+TjTNCuLs5OvWvLFqEWDWvZpEHuSnHkkOw0c+Lk+3wN8xd7ckHJNQNLC5s5dOKY2HfkBHfQCS9e5ckXLshuGM0Jfp30cxhJuEmG64wMnPXky+dPevLQsLTpBUfLMtaqClxMya/FOz5Z3ZfPy3sOGNM0adVNt3sdmyyb7R0ZBpFVtYMGGZRoTpA1se/wO69T3+ljVz0muH7qdmIHwGzGNAxa1hmj5mj1i9Sy/GSavgnZplA46pWliDaFvJZQ+95+/+oHBACAOsUwDIrFWig40Q7Tsv1k2RNxi4ZZndhN2LtksoPsiTjyM+8fmZHxNjpwxQIAAAAANAhYsQNginCVQe6E96y6Ilf9HaUUkVOZcGMbRE7FT8qsuoV0NxoAAMwGTNOkG5Yvp6amaja1ZbErVpFB6krxf4MoHAmTYVRtYSgUvOrxwEejbid2yVaTwkGTypdlynve4TiTrFbVQ5my9IatlQyJxbhch98nK7Hns1w2IOTTbkdJ3po3X33Vk5eu1Kqen5OdJ0ytG0ZYq/puWbISeyjEsS/ZjIyxy+f5daXCpVWaQvIYd9y2wpODWsmUiiVLujhlLleQ7+cYO3NcfqiSYS5xcNuKm3h7okPoHbx4moiICiV5HiBx3WoHFSIiUiYZE8ZNuYpKxYJXoaLiD5BlXdmH6vZzkdHhUSqEirRp4x1i+x2/8zueHAhwuQ7bks6WK658IiJXabF4JEt8lEtsJ/MltguXz50WeiNa/OzI8IgnnzopQwUuXGL715Ts4h0BaVsMP8fYlSoybvrlfa948qJlaz25u6WmQ4XWySeslWopFmTniVNpjmdu0uyio6S9GhjNeHJb22JPzpXlZ3DvvjeIiKhcRqedD8O0LFq/YSO1tFbLjhlkkDnxQ9Z1FTkTXTuUUlQsF7wOTbFo08wMuMGBKxYAAAAAoEGo2xU7AGY7SilyJ1bgXOWQ61ZXDAyj6o5VisigakFZTw8rdgCA2YYiqlTKXg9jkwwyVNUloZS2YkeKlHKqbyAigxB6MhXU7cRuwQI/NYV8FDfkkv7JfnYfDGpNrEuOdFM2NfGlZXNcFsRxM0LP0hYtR4bY7Tuekcv2hTIfw1IsR5tkSv7gALstzmXZ7ekqmSrf0c7uYcOVZUNGU9xRIhDh60rEo0LPr7ljipqLhWzpbs4WWa+U0TpKuHLB9obuTk/u6uTx9Z+TZQIuD1X/BsUyOk98GEopKhQylLeqhq7iZMma+HsZRJ6bgohobDRFauL5GB8f/8CxQOMTDgcoHArQ5bTsLvPWOwc9OZlkW9ORlJ12ymW2IaOjKd5R063G1mzN/CXsOu1ulrbl/Anu3pDNsOs02dEp9MKtCU+2guz2zOXleefNW+jJAxfOiX3Dl9mezuvi+BqjpldvpqjZSZvtYtmVdiighbkEtLIwpctDQo9MtoUdWqmWUlG6XK8M47doHTxnUcql/jOnKHUldKpSIXKqfxdDKbLUlR+1BrV3tZFlV0MEDKdw1eOBj0bdTuwAmO3o3wPVhvLK267IJYMMUqoagyL1AABgduG6iu1XNcC4KiuXXOWSQUTKICKlCBUBpxbE2AEAAAAANAh1u2IXS/ioKeyj/FBObG9OalleEc60Gh6UmVaFEi+n2352EZRqEptczZ1YdvgYY/lRoRfRMlILOV4+zhdk54mSdjxHk5WS2WmZNF9XLCYrp8di3A0jn2e94ctyTE1N7HIwtKw4oyJXffw2H19PVvPXNMVefMNiPq/W8Hr/fllE8p0T1UbgFQfxYL8O13HIueKOMEz+NauIyL0Sf0JUcSre8p7jwr09FwnYLgV8LhULKbH91Vf3eLIqs92JhaXNKJc5dKSgZdXbNb/dFy3u9uQ1m1Z78rKFXUIv1c/u0oFRtnH+msz8Za3smh0a4jCXtSvXCL2b1q705Gf/338V+2ziLhJlLXylVJJuOlXRPhtBvl4rIMe0eMlST77Uf5x3mNLehbQwl1WruMJAISfDdbrnJYmIqFiE2/DDcCoOvfnqfgpMfMEYStFEBSfy+33UnKh+B1uWRd3L51N44rvbF/Bd9Xjgo1G3EzsAZjtKsWvCdZXXaUK5RGqinZFSihzdFUuYLAMAZhdKKRoeGiTfRHy3SQaZEw7XQDBAllWNr7Msi0zb9CZ0epkeMHngroIZZefOnbR+/XqKRqOUTCbpvvvuo+PHjwudQqFAvb291NraSk1NTbR161YaHJQJHX19fXTvvfdSOBymZDJJX/va16hSQZ09AED9AHsHpgOs2IEZZd++fdTb20vr16+nSqVCf/VXf0V33303HTlyhCKRqqv5q1/9Kv3oRz+i5557juLxOG3bto0+97nP0c9//nMiInIch+69917q7OykV199lS5evEj3338/+Xw++ru/+7sZuS7XVfSrsykKT7i7Tcsk02D3q1ZDthp0POGLvTQGdw8AjUqj2jtFisbGx7xC6yYZXoKEnfdRya2GOZmmSa/8/FUKBqsu23Pnz8/EcBseQ9VZGl46naZ4PE5v/c//StGwjy6feVnsH9NKdKTzPC9NXZYurPSoNmd12j0xEpTp+o7eyaKY8uTxnOx4Edbi1JrCHANXVPK8Oa2LRLnI+wwlF0cjAc4LamqSJV1srVxJ2eEU/4uDNV0utPiEeILjCG2/X+ppVdqHszy+8bQsrfGpzbfzPq2tx3//Hz8UeoMTYX+uq+jsaIHGxsYoFovRZDA0NETJZJL27dtHn/jEJ2hsbIza29vpmWeeoc9//vNERHTs2DFatWoVHThwgDZt2kQ//vGP6fd///fpwoUL1NFR7ZLx5JNP0l/8xV/Q0NAQ+Wvux9W48tyB2cNkPnczxZXn7v/5zhMUDoVoMCUn9heGOb7NLfFn0irL1RlXs2PK4lgyy5bPflCLS563ZJ4nR0jGdo5oJY4OnePY3ldfe0XoXR7iEiJLl3Ac3fo7ZAeNiGbjfvwfPxD7VJm/gjq1siOmJdcdXIev2a91CbL9Mk5r5UqOsTtz7G0+jyM7/Lxx8C1PvvljGz05r7c0IqKuZPX7o1Qu0bP//izsHZgRruW5gysW1BVjY9WaVi0tLUREdPDgQSqXy7R582ZP58Ybb6SFCxfSgQMHiIjowIEDtHbtWs/IERFt2bKF0uk0HT58mK5GsVikdDot/gEAwHQCewemAkzsQN3gui49+uijdOedd9KaNdWsuoGBAfL7/ZRIJIRuR0cHDQwMeDq6kbuy/8q+q7Fz506Kx+Pev+7u7qvqAQDAVAB7B6aKuo2xy2ZsMlwfkSWbBDdF2FXhC/ESfqSm6XQ8zq6JTDqvyTIINZPTyp0UWI76W4Ve0MfL/ZUil0WxbTk39msvfVrTbsOQemGtM4ZZ81eo6C6HEO+MJcJCb2SEXanjmks41iLHnqtwjZf3zrCL+di7/UKvo4WXeTsWaOcypbu5baIDhuO6dHZ08mLCent76dChQ/TKK6/8ZuWPyI4dO2j79u3e63Q6DWMHZoxIxEfhsJ/iNYEx0XYuw1HU7E6w5je532D3mwpp5Y3C0i3nFriUx/g4r9pYYeniSS5LePKyMLuD3zv9vhygwTbOF+aQj/MX+4Raa1vzVWUiolKeXZ/FInehyGalbSlqZUjKRS4DZQelXezo4tCbsxfZ3g/2ybEXMnyu9w+/zeNrbRd6qrm6mqbKk5uxDnsHpoq6ndiBucW2bdto9+7dtH//flqwYIG3vbOzk0qlEqVSKfErdnBwkDo7Oz2dN954QxzvShbZFZ1aAoEABWrqXwEAwHQAewemErhiwYyilKJt27bR888/T3v37qUlS5aI/evWrSOfz0d79nCh1uPHj1NfXx/19PQQEVFPTw+9++67dOnSJU/n5ZdfplgsRqtXryYAAKgHYO/AdFC3K3YX+onCQaJiSrpYo+3spgyGOGM0Lj221NLCl5bJ8rJ9KiU7WYxe9msyb7dcWaXc1ZKHr3QTqO6Q2WT6TNkwOfPVsuWtzjusqWrKD/m0Rt2V3AifNy/H7mjZs6kM7yvVNC8Y0VzRZ07yRaYuy+yvUpbf2BnnX36rFs0XelcOV3Zc+uWZEfoo9Pb20jPPPEM/+MEPKBqNejEi8XicQqEQxeNxevDBB2n79u3U0tJCsViMHnnkEerp6aFNmzYREdHdd99Nq1evpj/+4z+mb33rWzQwMEBf//rXqbe3F79SwawglzlJ5ASJXPlb22ewYRscZNfhe0fOCL2glrXvjyc8uS0p3Z5dbZwJaWvFYVvjMnxDbypT0LrwJJPSZTu/q8WTL2rxXSdOHBV6i0s8gdFdykRE4+N8Xbkcu07TYzLAX3fFOiW2aVYgIvQOH2rz5FKRw1CSSRmXNv9m7o6RbOd9be1y1Ss4cfzCJHSegL0D00HdTuzA3OCJJ54gIqK77rpLbH/qqafoS1/6EhERffvb3ybTNGnr1q1ULBZpy5Yt9L3vfc/TtSyLdu/eTQ8//DD19PRQJBKhBx54gB577LHpugwAAPiNwN6B6QATOzCj/DZlFIPBIO3atYt27dr1oTqLFi2iF198cTKHBgAAkwrsHZgOEGMHAAAAANAg1O2KneNrJccXoLL/drG96HJ8hlnhNPxg3BB6iXaOzWs2OYitJSdT1lMjHJuSGua4unxW3hqnopUN0LpIuBV5vEKe4zD0CuCWLWP2xgv8vnxGxm74FMeFRM0on8uUMSflMo8xEOFfgkGfjLNI+Pl4SynhyWtvkbEpK2++xZMX33CDJ2/YJGP7zl2oxroUSxWiX54hAMBHQ5WK5FpEZs1vbbvMdiPmY5tx8LV9Qm9gkG2hoX3+N2xYJ/Q+3sP29EpxXCKid375utDLFtgmnejjskinzpwRevkc2wal2AYHY7JkSFrrcjM+Oiz2ZdMcw6dbcduSNj0e5bImXVrSQXPrPKGX7OIYua7b1npyS0zaO7/eoUOT9RIuROTZe70jEAD1DFbsAAAAAAAahLpbsbsSg5ArVFeZ8oWS2G/4OGPUdXklzszJX3d2lvXI5GzPbF6usGXzrJfTV9EKMhbCFZmrv2bFrsjHc7RfsJYjU1XzRT5+oVQW+5Ti17a22lgoyfTZov7S4ONZSv7iLGp9JUsVHoevpt9kTrvXGa04aL4ox1ecGMeV49ZZu+HrohGuYa7RCH+zK9eQL1Q9EeWa39oV7bNcKLC3wnGl3dGz9g2tWHm5Ij/jBS0jtahljBZL0s6WNJtU0Y7h1pxXaa/1FTu3plqAq/WiVbXH+JC/Y+1m/dx6ZYJKzTWWy9p1addbKNZUOjCvbcXuSlZsIz13YPZwLX8zQ9XZX/jcuXOoiD3L6O/vF0U2ZyOnTp2iZcuWzfQwwDXQCM8d7N3sA88dmAmu5bmru4md67p04cIFUkrRwoULqb+/n2Kx2G9+YwNzpf1Lvd0LpRSNj49TV1cXmebs9uqnUilqbm6mvr4+isfjv/kNYFK5lme8kZ472LsPAns39biuS8ePH6fVq1fX3X2eC0y1vas7V6xpmrRgwQJKp6uJArFYDA/dBPV4LxplEnTlAxOPx+vuHs8lfttnvJGeO9i7q1OP96KRnrv586uF5+vxPs8Vpsreze6fHQAAAAAAwAMTOwAAAACABqFuJ3aBQIC++c1vovcd4V5MB7jHM8tcv/9z/fp1cC+mB9znmWOq733dJU8AAAAAAIDro25X7AAAAAAAwLWBiR0AAAAAQIOAiR0AAAAAQIOAiR0AAAAAQIOAiR0AAAAAQINQlxO7Xbt20eLFiykYDNLGjRvpjTfemOkhTTk7d+6k9evXUzQapWQySffddx8dP35c6BQKBert7aXW1lZqamqirVu30uDg4AyNuLGYi8/cVDNZz3RfXx/de++9FA6HKZlM0te+9rUPNH6fzczFZw/2bmaZi8/cVFNX9k7VGc8++6zy+/3qn//5n9Xhw4fVl7/8ZZVIJNTg4OBMD21K2bJli3rqqafUoUOH1Ntvv60+/elPq4ULF6pMJuPpfOUrX1Hd3d1qz5496s0331SbNm1Sd9xxxwyOujGYq8/cVDMZz3SlUlFr1qxRmzdvVm+99ZZ68cUXVVtbm9qxY8dMXNKkM1efPdi7mWOuPnNTTT3Zu7qb2G3YsEH19vZ6rx3HUV1dXWrnzp0zOKrp59KlS4qI1L59+5RSSqVSKeXz+dRzzz3n6Rw9elQRkTpw4MBMDbMhwDM3PVzPM/3iiy8q0zTVwMCAp/PEE0+oWCymisXi9F7AFIBnrwrs3fSBZ256mEl7V1eu2FKpRAcPHqTNmzd720zTpM2bN9OBAwdmcGTTz9jYGBERtbS0EBHRwYMHqVwui3tz44030sKFC+fcvZlM8MxNH9fzTB84cIDWrl1LHR0dns6WLVsonU7T4cOHp3H0kw+ePQb2bnrAMzd9zKS9q6uJ3fDwMDmOIy6KiKijo4MGBgZmaFTTj+u69Oijj9Kdd95Ja9asISKigYEB8vv9lEgkhO5cuzeTDZ656eF6n+mBgYGr/m2u7JvN4NmrAns3feCZmx5m2t7ZH2HsYIro7e2lQ4cO0SuvvDLTQwFgUsAzDT4MPBug0ZjpZ7quVuza2trIsqwPZIkMDg5SZ2fnDI1qetm2bRvt3r2bfvKTn9CCBQu87Z2dnVQqlSiVSgn9uXRvpgI8c1PPR3mmOzs7r/q3ubJvNoNnD/ZuusEzN/XUg72rq4md3++ndevW0Z49e7xtruvSnj17qKenZwZHNvUopWjbtm30/PPP0969e2nJkiVi/7p168jn84l7c/z4cerr62v4ezOVzOVnbqqZjGe6p6eH3n33Xbp06ZKn8/LLL1MsFqPVq1dPz4VMEXP52YO9mxnm8jM31dSVvZuE5I9J5dlnn1WBQEA9/fTT6siRI+qhhx5SiURCZIk0Ig8//LCKx+Pqpz/9qbp48aL3L5fLeTpf+cpX1MKFC9XevXvVm2++qXp6elRPT88MjroxmKvP3FQzGc/0lfT/u+++W7399tvqpZdeUu3t7Q1V7mQuPnuwdzPHXH3mppp6snd1N7FTSqnvfve7auHChcrv96sNGzao1157baaHNOUQ0VX/PfXUU55OPp9Xf/qnf6qam5tVOBxWf/iHf6guXrw4c4NuIObiMzfVTNYzfebMGXXPPfeoUCik2tra1J/92Z+pcrk8zVczdczFZw/2bmaZi8/cVFNP9s6YGBAAAAAAAJjl1FWMHQAAAAAAuH4wsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBD+N0XSy/Q4P/1ZAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnYAAACbCAYAAADvEdaMAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABId0lEQVR4nO29e5BcV3X/+z2vfk4/5j0azehh2ZJl/ALbksfmR5yga2FIwMHUL9yiAqQoKIjELaMUSVyXQIVKlerHzS0oiMB1U4lN6sYF5fyueTjG+TkykXGQbSzsYMl6WLbk0WtGo5np6enXee77R3fvtXdLspE8o2m11qdqanb3WX3OPqf3Wb3PXi9DCCHAMAzDMAzDXPaYS90BhmEYhmEYZmHgiR3DMAzDMEyHwBM7hmEYhmGYDoEndgzDMAzDMB0CT+wYhmEYhmE6BJ7YMQzDMAzDdAg8sWMYhmEYhukQeGLHMAzDMAzTIfDEjmEYhmEYpkPgiR3DMAzDMEyHwBM7pqPYsWMHVq1ahUQigY0bN+KFF15Y6i4xDMMsCqzvmHOxaBM7HnDMpeaHP/whtm3bhq997Wv49a9/jZtuugmbN2/G6dOnl7prTIfD+o651LC+Y86HIYQQC73TH/7wh/jkJz+JBx98EBs3bsS3vvUtPProozh48CAGBgbe8rNRFOHkyZPIZDIwDGOhu8YsIEIIzM/PY3h4GKa59Iu/GzduxG233Ya/+7u/A1AfS6Ojo/jiF7+Iv/zLv3zLz/K4u3xot3HH+u7KoN3GHeu7K4OLGndiEdiwYYPYsmWLfB2GoRgeHhbbt29/288eO3ZMAOC/y+jv2LFjizGMLgjXdYVlWeKxxx7T3v/kJz8pPvzhD58lX6vVxNzcnPx79dVXl/w68t/lN+6EYH13pf21w7hjfXfl/V3IuLOxwHiehz179uCBBx6Q75mmiU2bNmH37t1nybuuC9d15WvRWEC8ZcNG2LaNublZTT5uRrLdHROyPdKd0uT6euh1by4t2zHT0eSseFJ5YcnmbGFOk/MDOlY+l6NzC339fDw6l1qN2olkXJMLEcp2tVrWtmVzGXohSM7z9GNZoK/PUvrele7S5NIpuha2k6D+uZ4mJwzlacCkfXueLhcIQ37+r779z8hkMlhqzpw5gzAMMTg4qL0/ODiIAwcOnCW/fft2/PVf//Wl6h6zCLTDuFsoffc//uERJFIpnDz0siZ/5s2Dsh2GdE8OjKzV5EZWr5Pt/OCIbCeSuoo/vP952R5/Y69sByVdB1nKsTL5rGzbcV3P3nL7HbJ91dXUp1pR19v7X/2NbEeRrk/8oCbbB/a/Ktvzc9OanKpbA1/R1TNVTa5Uof0FIR2rr69bk8t30+9CJEr0mUATQ61a/458P8BT//ZMW4w71ndXHhcy7hZ8YrdQA862bdi2rU1YAMAyadnYtmiyFXN0ubhDp5aI0WQuZukTOzuuvLboM9WYLmeadKyE8hkz1MRggCaeiGhjomV/oeLeGIX616DuH4LkTAhNzgLJqdcpGdePlUzEZNtxqN26An++iZ3VItec2NF+Lr+l/AceeADbtm2Tr4vFIkZHR5ewR8yF0g7jbqH0XSKVQjKVRjyR0N6Pxeh+VSd2rXJJ5eEtpTzYtU7sEkl6kI3H6WHTbH1oVI+lyNkJ/QE1labJUZfyw2NH+v5SKTpuFOm62vPpe4zH6XzdFp0pFN1qgPZh2/qxbFs5Z4N0sOPo1yKm7D9U1jhah1UY6Hq3HcbdhcL67vLnQsbdgk/sLpTzDbgDB/bDME0UzpzR5HsUfWb00ou+UJ/NGknybSlHM7JdCvWbVBikSCo1erqrVF1Nzg9JqZxRZjoJW99fEJCcZZ5bOdaPRU/IQcsTrFHrlW1T0YG+q/cpadP5l5TVt5lQf+RMpUj5GsqKpdEyyYViv6/USFkGfovSt+vn4votj7ZLSF9fHyzLwuTkpPb+5OQkhoaGzpKPx+NnfScMs9icT9/NF2bhuy568z2avOinCaOwaeVs2YqrNLlQmUiZUUW2o4p+j9ZmaRVMVGlla3mf7gu4YvRq2R69eqVsDy8f0eQGBqh/jkP3U5DXV/ZGR+geDAJd39VqtOJWmKWVszNnZjQ5O6Yqf1KM3b36fZxI0/7mlJXDeEL/uYsEXRvHpn0U5wqanOfWdXzA+o65TFhwD9CLGXDZbFb7Y5gLJRaL4ZZbbsHOnTvle1EUYefOnRgbG1vCnjGdDOs7Zilgfce8FQs+seMBxywV27Ztw9///d/j+9//Pvbv348vfOELKJfL+JM/+ZOl7hrTobC+Y5YK1nfM+VgUU+y2bdvwqU99Crfeeis2bNiAb33rWzzgmEXnj/7ojzA1NYWvfvWrmJiYwM0334wnn3zyLP8nhllIWN8xSwHrO+Z8LMrEbiEGXMI2YJoG0OIWsFLxq1s1SNGpA/26b0pS9StTnA6rbk2Tq/lKhJoiF1OcjAEAigOtiOgzuR7dlyTw1YAO2kfYEmRhxejEXE/vkx9QP1KKnJ3W+5RQtgUG+eyZItLkAtD+1ECIrrTe91KZfHP8QPHZafHZnC/WI4Y9v+Wk2oCtW7di69atS90N5gpiQX5gfR+wfXiu7s9aqZA/2qq1y2W7VNajWD2fdEhPH+lF29GNMtdcQ5Grd9x+q2wvH9R953K5fuqaTfd5qiV4QnUxNpRw0mq5pMm5ip9uKqnrne48+fetueo62d6//6AmB4P24bqkq3JZPdpViQ/DXJFM5AK6b18UUednZ+l6Viu6L3Mz02sQto+PXRPWd8y5WLTgCR5wDMNcKbC+YximXVj69NkMwzAMwzDMgrDk6U7OR8IIYRoRMhm9i2uX07J7b5JC3p1IN2eWZmjZPYxo/lptCf83lWX7bJ7yP9kx3eRQmJunbUqXejK6WWG+SEv6npLSpFrTTSxCMY92KbmgAMD3KFzfVPJJOS3h6qGSHNlWbKxuizknptgmzIjO3y3pSUShpIKJK2lWgkg37c6V66YKL9DfZxjm4ghqNQSGASPQ3RviMXK/mFNSP/UO6abTFe+i9CQDo8OyreatrB+IdIOWGPiUngy48sYUyZmkSw++8l+a3G3ryXT6vg23ybZoqVRZLFLC9/E3T2rbYkrS9FiMooT7+pdrcuPHXiO5BOndUkuC92KRrpPtkF7MZnVdXa2SOVe1sgYtek3m1tNPiWHaFl6xYxiGYRiG6RB4YscwDMMwDNMhtK0pNh+3YJkmki3mx5wSGdqfVUrCRLoJQ31l2Ypd0dTnsq6SsV0tRWO3RJaGLplHhUX7OH26oMspkaLzFVrqr4R6RFZXUklM6up9t5TSOaZB6/9WXC8jVC2TKSXlKPUcW8wgNaWiRlXJnh612BYKJdpfoULXpdSavd6vn38QsimWYRYCt1qBISJ0JfV7PNtD0anvuelm2R696hpNbl6JSD34xjHZLio6CABKhYJsTxfI/HpqQnfLyCpRsTApSvTxH/5PTc7576QLf2fsvfS+o7uDDA2ReRhCryZUmCU3l1+/RDVlbUfX/ekM6bhAcRvxSgVNTlHP6FeyJYQtOnh6hvphQqmnbes/i/l8PcrYb6nAwzDtCq/YMQzDMAzDdAg8sWMYhmEYhukQeGLHMAzDMAzTIbStj11fLgHbMpFxLO39RIJemxb5WSRbKkX4StqASEktIoTuZ+EpFSVCj3woItGSnkTxzxA2pRCY9/RQ+zCk/lUUH7RWf7T5Mu3/xIy+D8ck2WyJ+u5P6L4p1Tnyn1nRp6Q7GNBTIRgZSjXgzpJfTamkH3dunnzszsyRT+HRY3OaXGjVh00kOP6fYRaCeNxGPO7AtzLa+9UkpWA6UqR78uVnX9DkZqap0sOJk1RtwbH0sjGqbnED0mmqHy4ALOunn4bTE2/Kdjaup0+ZLxRl+9CRI/T5ZX36cR3a37LRIW3bsPJ6fIL8Aw++ckyTG1hGfn9HxxVd6Ou6NfLodahUzUi0pLCK2+SjXa2RXDab1eRsu/45EfE6CHN5wCOVYRiGYRimQ2jbFTuGWQoss15b2GjdoNQRNrRo4lZJpabwWyxoqvWLz6LxwbdaDxUCEG8lIZrH0fuoHlVdcW3dk9Y7pa+mYcAw6HlQTUTbcij1Y5pca/Jacb7LKc7ZhBACES8WM8w7JhaLwzAM7R4UEC336Pm0gzjvParTer+f+/4/S1e1qEhD29Yi13htaNsa9ebP8ZlWQqWYexSFLTqYzle7TmcpeEUXvpV+Pw9n7c6g40QXmIGibSd2Q30pxGwL2ZieaqMrRaYAQzOX6lfFUNKVuEqGcbPlh7g3QwWz02lKNVCc082eOWV5fl6pIvHmCV2u5JIpNqZ8F8tT+qW2HcXUOV3QtrlCqaihpDvJZXUzzR3XURHv4ikamKKiX4tcH5kc3Ar1o1TSF2zjDsmNDtGxBgb0YuaTxbrJNggjjO89jk7iwxtXw7FNJKwItlm/jrZlIB5rXhuBmAjRHG+2acNopNARQiBSXABqbk3e/GqlEdO0YTUqChiGgUQiJhWBWyvD9135mRCKad8L5M0/X6mgXK1/D6EwZAoaALAjUkUZx0DaNuWxLMU0d2zeh9twRQhhIAQpRwu0j1QqCauRQ2Iw34eR/nrqChEC5ZlI3nrpLiGrsjjxALF0/T6JogizxTl5LeZLNfiNtEB+GMKVKXgMGFZMHjiIDJn4x/UjFBv3XRBGeOHQBDqJZHIAyWQKpwu6vjt8jMyRr+7bK9umo+uTUKk2U50nFwvL1H8Qqi6ZTgvz1J4vlzS5o8f3y3Y6Sbpg3Zp1escVc+5//uI/ZHvl6tWa2Np1a2W7tzenbYsn6FxyWTKXmoHuAlJ21QpClIKlWpjX5MKQXEoSSdJppaIul1XSp8QVFx/P091wKo2UMb6vfzedwJ/9n/8DiUQS85UQnl+/Pyu1Gk7PzDQkBMKwCDTuRMOypK4SQiBSrolbIX1nGkI+AEcihOdXGnsDypUKosbvs+t58BvVUCzbRirTdD0wkIglpW5NmTYSZv07ijk2BvJZ+SDZ29sNp+Gylc3mkMnVx1csFsPIyHKpT7KZNCzLahzLgNXQi7VqDfv27pV9P7zvVyjP18eeLwx4UX0HQSgwMe1KHewVZxE1xr8QPkRUH5OGaSLdlVQmdyFdP9OUfRAC8ANT/jaEQSAftg3DgNU438AP8O///p/n+QbPTdtO7BhmKUglbMRsC0k7hNOY2FmWiWS88QMhBGLKs6NlWvIGjKIIUUiTvJhhy1U19cnPMC3YMUfe+MmkA7OhwKqmB98LG59pTuwMCCFgmvRUFwaWPFb9Yc6o/wnAFuRjkXRMdDn0g2jZRuMJXSBhmzAaDw6BMBDKTwnYoCfmhGPBbuSCTCdiyKfrOb+iQMAqR/IzXUmg+WxgJ3wkuuqfCcMQoU8/sCII4DcmmH5gwFafLyxbXhc/Iv9Y0wzhNZbpOH8iwywM+XwPkskUzFgItzGxMysVlBp+i0JEiEIBqcEskx5kowhR4wFNCAEDtlRQhhGhuVgWRgFMnyaDfhTJCUxkmICcbNlwEjQhisfTUi8mTBuphm93zLaRyWSkXD6flz6cuXwe+e562dFYPIbBoSEpl8t1ST1mWQbsxmSwUqlgcnICQtRXKqeyGdhG/by8CHBFvQ9+ECFZteV5WH4VUeOBOopMWnk0DaRSCWViF9D1MwxYdnMfgBeYaP4u+H4gJ5f1h3BLvn+hsI8dwzAMwzBMh9C2K3bdXUnEHQu2V9DejysmiFScsoW7VX353FeK3efz3bLdahf3Qprb+r5SyaGrS5M7OUVL/6+/SSaCqXl9Nq0WaViZpOX9e//bzZrcyDLa/7/seUPbtvswmZmCiEwdTdNgk/kCFequlKh/mYyjySEk81siQdtiCT3iOGXQtkCpir1CKSoOAJmZuknD80M802Gm2FTMQcyxEEMIs2EusA0DtpLOPmbZcs0uisjfKwIQRI0ldxhItERqq95ooumnZgBRFCBqLPdblgmjUW0lCAXKill9esZF0PAFKVU9lBsVSyJhoBoY8gg9dmP1DsBQdxZrB7sb+zaQy8Ya2wRS4zPSrWCm4mG61DQBi7qdtYFpCFkBJQo81Cr17z8KBTyXnjKTSRNm4ynVCgXQuBYQERzHkk+0tm1KU4wwTMQMS14fobhQpLNp2I3IxZofIFau90+NeO8U8t29SKbSOHzskPb+qaMUaZpy6B6fK+uVIkrF07JtRHQNC/O6ibVQJR1nx+l+7xsc0OSSiovK8lU3yfZoi8448l+7ZdsySFf5of4dTZ2haPwbblivbbv6mqto/0rka9ft79bkfnNgXLbdGrnNuE5LVCzIxBoJ0mMTEyc1uZhS1SjXrZ6/ni2gWq27zXSiKTaZTCOZSmNmfhbVWn1sVMolFM7UXYwMCNhGBWjc/37oySpPURjAq9WvlRBA5Ptyxc4LfOm3FkYRKh6Nu0BEkK4sMUeuYBmWgVii8ZtuALneYVhWfYxmbQvphg4WgY/Z6SkpF48ZcsUuDEN4jQohtm0jUHTFyMgyxBtR3emuFDINs68BA93ZrobeA65aMQy3Unc/KJZrmG5Eo/uBQDlNeswTHqKGGVnAR9TIuGEYgO3QSlyl4iJoyJmmCatpcTAMxOIZKVc3ezdXSgV8v76/ixl3bTuxY5ilIB6zEXcsmL4Bs6F8LAOwlFJ0tkNmVN8LGqaKhtNxQ0YAiMdicqKjPlBEAgik+VQgiiJp9jRMU04iQ0QIwobiEECp7MtJTakWotownYRCwA1pMpeLG7KsUj6Zwqr+XgCAY5sY6EtKU+zJ+QCFal15BFEJc5XmsQRCqBM7SLOKiEL4DSUdhQKBTz/mUWhB2E0zDQDR9NkT0kcPACzbhNUwbwgDsJvmaiEQKlER6WQMiUT9Bzzm+vLaen7nTewYZimIJxKIJxKNiUT9/ndrNVRLTX9EgaTtSf1UrZXhN1J/BYGLWqWxyCEEIMiTuFKrwWtMSPwwRFnxW7Ri5G6RznQh2VysMQxYUrcaSGa64Tj1+7/LNpBt6Ba3UsLsCVoMqWSTiDV9oA1T+uWapoUgjOSxMpk0ko2SfbZtQ67dGAbSqUTjNAQG+roRuvVJvzNThNdIn+MFEVJx0XgAFbCjJEToNK6SDwFH7kMo+jMMIzk5M03yqTMME3baksFoobKYEgRBPYgDkP8vBDbFMgzDMAzDdAi8YscwCo5tw7EtGJEFNEwJpkHL6gAaq0pCtsOG6UtEkE+LBgxEkWguWkFNDRDJfdQlnUZAQ12KVv0iAVRrvnzfDyL4gZDb5POxYQCNpz4D9ZQtVsNs7xgRHKP+JOjAhBn4MGBAQCBh2mh6CySdOBK219i3gKuYMERE4fau52G+ESUoQgHX9+V5JYMYombUbWTBicjRWIsKtkxYEUXqqtkTfCUwwkAE0XCpsAyBhNM8R851wjALQbU2DxghKtUiytVS470SXJfM0cJ10bzJg4hMsQaAVGNFHQaQiicauhIoVV24jVU6P4xgVclMGU8mYDRMAJlcFql0GgBgx+Lo7msmqzaQSHTBbJhiLSOEEM0VrAi1Gpl250tlaYqNDBtmo4CA4ziIxequJwbqeiwMaB9CNa8ISvFSK5XgNt1NPB/JRkSYYwG5rC0/NxdWEHh1/RSEQllxE/IaAUAUhtLEGkUCUUP3G6alWWuE0gchhDRlR+GFr9i17cSuv7sHiZiN6kxNe980lHQdFSXE39Pt0LahpIlQTDetS5RVxZSU7ybfDC/UfzzeOE7+GTNF5UfP1jOxqyanbILkBmw91D4xQ/4y12T1TOynemgfkwXynXErenb4lw6RP44Z0A+in9YzpyOnpCsxldQCuZQmllHMYDVl6Vx4RU1uVX+6IdN5Pie5rgwSMRteKUDoNaLELBuGMnJqLn2vXhDKSZoBAaspZxhwFTMAGUrrPieeMoGJpbpkvqW6a1pDwQQRTp2pX3shBOZKIZofC01D+S4NWGZ9umMYQDIWId44WMbxkDfrPiIWDMSKXl1IAIOxLnQ1jhv5cbherNG/ENPzDSVjAJEfIWqMr+lqEdMzjfEgBIyQpmxhMoVEWO9T2kgCyeY4FIBhofnjkEjEEIvR/Rkp485VIsNswweC+hhLWBYyubp5pBPH3ZEjexBPJHDg9cPa+ydPvS7boZLGJJNLa3Lrrlkl29evv162T01VNbk3p2gf/UOkF1au0dOTZHrJ52xyVvmRP3NEkxt/k/zepgrkR7f+Ok0M/9ta8qsrl/Q+qZYm4ZGO2/fcbk3umnU3y/bg8rxsP/fCM5rcxCTpK9U/qVbV9efsLOnkZBftLxK6z165Uj//oAN9OydOHUI8kcCxk6cwM1c/z3KpjOmZup+3EAJRuSIf3hLJOJxY/R5PpeJYPlwfQ5Zp4Zo118Bu+MvNlXxUqvVr7/oRJgr179wwDPT09cJsRHz29Pch00jjZTkxpLJ52bfpYlVGwFuVWaBaAFB3fzk9Sb+Lc+V5mI3f3b7+QfT314/b1dWFa6/tkccNgkhWnvK9EM2vOQoFhE9uKKeOvIH5mboPX7a7H8ub6Z0MCz2DGflY+eqBEorF+v6q1Qjl5gOvENpYcV1PjsMgDGXbsix0ZfPyWkQiko/1QRig5jbSil3EuGvbiR3DLAWGQatnehLMs33lzrODiz5u4wC/5T6UlS75DgVQqLs4O9EnBXKc6xzV12+ZBPlCOc/1bL2250vuea7vhWGYpeNcOuPt2ufbZiivz60L3qFuvUjImkKv305XLTU8sWOYc1A3EaoTiWb7LT8FPVE2GQ21LeeYdUmrgLJid3YG+HMfqd47qRb14g3K/mDUTbiUVf3c+29aR0Vzb8oEr/UMm/uh4ylb1bYhzv7gBaKaKRiGeedEYVg39am3KvTJVj2Cv2G9ME2ZW86yqG2aZqNiDwUGSMeUVv35lkq0pQpG84U4W1eJxkNwvRIN6QapP1vkTVN/aD9LjzVz60X1gDagkauv2TaksQOGYTSq8Jz7d0E/tnGOlvoh6u+5KvRcjL5r24ldvrcPybiD7i49ZYRpUoh+oUgh/35L5nRTLRECWloXLRnbu7oobN4Htfe/oacdKCs+B4kEhcknYvr+kmkyb3ZbZAbYc3hSkws8+pyb002x/d3UD0MJ3fcD3Sxd8cikoabF8ALdVGUo5mZ1ZDmmPhqFqVS8sKl/gevqcs0o0LDzfmAz+W4k4w5EOgE0fCYMmDDMZkoOgVK5RGk5XFdebwN6qgmhjLvQMqVfiWGYSKVoHEdmQibinSpMo1AsNnbtoap8x7bjwGo47dm2ISN1DdNETBmT3VYIu5GeoFj1sW98utkhiMBuNlFJ9iBsmHOFKdCTrZtig8iGgS5Khhx58nyDKITfOF8hgFqNxkAQhTAbt10YBECgpCBShpoN8j0ULf6LtqWYaAMlYacAOTB2YH7iX/3nz2E7NuxBvbLDmvU3yHZSKW6//rprNLl1a0dkO6zRNRSmbvYsgyrl2A7pGcvKa3J+QOOpPD8j27kWM3ig6IDx06SPE10nNLlctlu2r1qzStsmFDeHaoGqBB14/mVdrkrnf/3mD8j2DTdepclVXyRT7OuHj8p2KqWnsMrle5VX9HtRVH5XAMB1633qRFPsKy+/AMdxECZ7YcXqv13pfA6jyWa6G4G4F8mHtKFlA8hm69cxnUpgZKSvKQZTxOXELpbwUWrk/qr5Pjyz+Z0YcGKUvNeysjDNuinWMGwEYV0HCSFQrcwjaOiauBsg3nCpivxQGXcCU3MlNF/6wkGzCEufH6ArnW24uRgYWjaIeCPFT92c3HiojgzU5mty8nji4BFMn6pXfOnuO4PKTD3y10kkseLWDTJB8+rVA6jW6n2fmp5BdKxZGSfAyROTckJoWjYSDV9BwzRhNn5LDNOAYZJiq9XKsupJEATwPDbFMsyCYFlWPeO3ZStPVAbMxgQoElE9ZF3QJK15o0MxK9angMrqmRLggMZn6KnRQNP7MwiFTGnih+RzUX84pf2bRt2vrr47A7Z8GhUwTQNWQy6MBLzG/kTUnKvWjxvEI4hmyTqD/EMFhJw0CiEAQXn36udf/0gUQZuw6Q+WQn86FSTbfOIlaNXSVN4XhtG6U4ZhFhCvVkMUBjDjkXzwNGHCafrACoGYci8nE0mkUnX/zlQqLttCAJFLD2mWVc9XCQBWBJmPDmg8KDeDvQwThvSHNxt6rjnhUmqzqsWhW1b0gpCWboIwlLk+wzDSVhgdx4bj1CeOzcles+8iEnJi53s+fLfhi+d6CBp+n6ZlwTIB06qvEjqOjVA0Joq2Jf38jMhsBGc0tbsh+2CYpvRDbLUs11fsqOKHYsfBhcLpThiGYRiGYTqE9l2xM23AdGA4znlF4koVhRT0KDFbmbOaSnJZv8WOE5dLzsCZCYqSqpzRl+Ov6lEynSsW0URajyxdt2Y5HVcRDCz9PNTlftua07ZlYnQuvd1rZHvNNSs0uSPjv5LtA4fI9BGzW0yngszUQUBfudkS0evEFPOgYlKMWh4t1KetTuNffv5S3RwYBbJyQv0Jkuqoer4nn8aCMETYNMsKwNBWmKhtKE+IhmHQKh8ANOrBAkClWkGtYfoOowjVmqfsRk0arPuLWDaZsJIGGbdStkEZ21E3O8i9OfNyFTGAgN/06YgEXCU6MVKqUERRpJ1voNSRTBSrchUx5thIxhsmvNYn05YnUNEq0Hxffeo1DGkO78RasVMnpmFZFt5904e09+NxqsTQoxR9WDasR77PFEh3HTtMplMvimtypkHfpWUrrgJC1xlQ9ETokjlXtFz7rlyfbE+XyF3FjOn6ODrPfVHfqOwvQee1anhUE0tY9DkTpNNuuF6P6M3n87L9k+r/ku2JU7pOXz5AFXVCg3S10+KuU2y4RtSjGXUXncudgwcO1f3jYscAi34PmnrCQH3Frcnhgylpzqwn+aXfRaFUcaq5oUzNFIQRyoo7Tz0KtL7/epRt/biGYcCwqSZ3pVqV48YOXZhhM6K1htOnqOKRF0WIGmNqfuoUJhopWJKJJObOnGwYXgyku1Ky/qphGDITQRQGKM00XBQEMH5gL6rl+v2UGJ9B8mD9t9V0bOT3HpV6t1wtywpN1WoNxUaVlyiKUJovyVFuGhZZKAxDpoSBAVg23dSe5yqVJyK58qhmDfhtad+JHcMsAb94+fW3F1oyFtrHZ+7tRRiG6ViOj594e6HLiFaNtn/fywt8hH0LvL/FofOWXBiGYRiGYa5QeGLHMAzDMAzTIbStKbZWCwBhwPCrLVso3L5cprB2z9fnqIFJtv9ShfxPihW9AsTyUboEIqBtK/t0v581w+R/VqnRtuVrb9LkYoJ8NWbnKN1DUgutBzBNtvXRoWXapkKZfFWuupbSGmS7dX++bDdlc5+dor7PzukL0o7i72IK8rnxW4oLK251CJWM7eY5onfU/wzDvDOS6W7Ytg2n5ZYqKJVn4j152a4Euq+bUmEJye4MfSZquXlratUc5W2/ooklkoovrqH4W5ot6aJ6yU8tJsi3z0p2a3JCrTRi6McyQkU/WbR/J637ACe76HXgkr6bPqGnkupNk1/iRz64WbZf/K+jmlxJqURRc6dk263qvzn5TB4AZCoKhml3eMWOYRiGYRimQ+CJHcMwDMMwTIfQtqbY0AgRGiZEqGc6V81/yQRVpejK6GbKk0rx6yPHaZndbrF1xCZPynZtkuSuGdDTk7z/LjKJvn6CTA6Z5f2aXF8vVZE4PUUmgnxeD/83I9p/TKn4UP8cRSrZiYJsTxVOaXInTlHIv+PQ+eezupmmWlXKlNg0lzdabKxRpKfTIDl9/t+BBScYZkkZGl0Jx4mdda/VauRuMlkkdR3L92lyfkBmSjVFVLWkV+TxBe3ftsktI7D0tCipLKUdGegtyLaY0c2UnuKyYUS072SypWKQouIioev0UKkSZDpK1QxLvxalMplf1Qov8ZZrVlT0bjLVI9vvG7tRkzv4+puyvffVCTpOsazJxRoVOnxf7zfDtCu8YscwDMMwDNMh8MSOYRiGYRimQ2hbU2wul0YyEUNg68vfpRKFfwmflvDn5vVI0DfHJ5XPkDkimdDnsqeOkKljMEHmjOXLV2py+WHKbu7MK6bOhG6yHblpA22aIJNqMpjS5ELQeZTLNW3bshSZdz0l07uR1otYj6QpIi2TJxPw/PSEJnd6clq2fYP6W/Nass2bZGNNxymq2Kvq5pxmhYrwPBUDGIa5MIRhQRjWWea+yjyZH+OKeXO+OKPJeTW6lytF+ozTcotm0mRy7e8mM2W2R3cV6c/TsUKbqvNU43r/ZlaSDnJDxVWkJco2DJTI2pZI3dBUdJxiis336JG1UUj7VKP2cznd7BszSI8V5guyLXxdj928nnRmPkPX5fHH/5cmNzVZr0pwMcXYGWYp4BU7hmEYhmGYDoEndsyS88wzz+AP/uAPMDw8DMMw8KMf/UjbLoTAV7/6VSxbtgzJZBKbNm3Ca6+9psnMzMzgE5/4BLLZLPL5PD7zmc9oK7UMwzDtAOs7ZrHhiR2z5JTLZdx0003YsWPHObd/4xvfwLe//W08+OCDeP7555FOp7F582bUlKysn/jEJ7Bv3z489dRTePzxx/HMM8/gc5/73KU6BYZhmN8K1nfMYtO2PnaluRkENQe2p1eKcAxlLqqE0NuWnjKkUiKfu+4M+Y/k0wlNrjpLPnYDw1QdYvmNv6PJ7T1OPiKHDlP7jmU9mlyhQNsG11BVChO6z4mnZDrPCz09SfE0+cQllWzny3pajhWSX4hzI/mjVFvSovznEz+R7ePH6LhWTPcPhOIzp2RIgd8y/zf9ep9qCxT+f8899+Cee+455zYhBL71rW/hK1/5Cj7ykY8AAP7pn/4Jg4OD+NGPfoSPf/zj2L9/P5588kn86le/wq233goA+M53voMPfvCD+Nu//VsMDw+ftV/XdeG65JdULBbPkmGYS0bgAQZgR572dk5RV6M5uj+vvSqvyXUpqZ8sRUeWiwVNrlYhvZhMk25Zd42uW0ZXjsi26ZC/camg7290GVXNWXeEqmRke3Q929NN6VNsW68oESm6RihqPJHWU1gFNaUajvIZpzVFDOi+7u0jv+RSRdfB5QL5Ii/vJ7/me//gbk3uR//67wAWLt0J6ztmseEVO6atOXLkCCYmJrBp0yb5Xi6Xw8aNG7F7924AwO7du5HP56WSA4BNmzbBNE08//zz59zv9u3bkcvl5N/o6OjingjDMMzbwPqOWQh4Yse0NRMT9afqwcFB7f3BwUG5bWJiAgMDA9p227bR09MjZVp54IEHMDc3J/+OHTu2CL1nGIb57WF9xywEbWuKNQ3AMoCwJdWGUMyFJmhpPDR0U+ysUq+5WFQqL7i6qWNZjsy0t/3u78r2yLrbNbn/76F/lO0hJe2I5emZ2E+88TrJXXWdbCd6r9bk0oJMzJWZ09q2ZERmVa9K5oMz87opId9PKVh6h1bJdrWU1eRM5WUYIz+N1soTvk/XxlBC+w2hh/kHQX3Y+JdxCYp4PI54PP72ggxzCbhzw81IJpK46rqbtPdPnqCUScuHyVy69po1mtxQP/3QW4Lu63kl3QcAuEoaEvX+70rr6U66usiUasXIzOu0mIqrZXLteM/1ZLJdtXaVJudHpJBFy3pCEJEeFxb1yXL0nye/RvomUsyipq3vz0goek3Z5vq+Jmdb5IoSegXZ7u/T00q997/dBgCo1lw89pOf43KE9d2VBa/YMW3N0FA919Tk5KT2/uTkpNw2NDSE06f1yXEQBJiZmZEyDMMw7Q7rO2Yh4Ikd09asXr0aQ0ND2Llzp3yvWCzi+eefx9jYGABgbGwMhUIBe/bskTJPP/00oijCxo0bL3mfGYZhLgbWd8xC0LamWEPU/8KW5XO1SLa6Ai+qLXJKoGlPL0VXDaX0yKb33LpWttffQebX2dO6CTgeUDTZVSMUMRYZekTr0ABFV6lRXJWCbsLwAtrmV/WvIQSZAl4/cVy2X9n7oiZ3x+20z94hiugtzutPc44SXNa3ikwuUUs0WeiRyTVQTNZzUwVNzp2v79D1FyYTe6lUwuHDh+XrI0eO4OWXX0ZPTw9WrFiB+++/H3/zN3+Da665BqtXr8Zf/dVfYXh4GPfeey8AYP369fjABz6Az372s3jwwQfh+z62bt2Kj3/84+eMEGOYduPd71qLdDqNd71bN8VWryeTazpHPhW61gGEobioKCbGnrS+giOUW169+6NI32OgRoAqOth1ddeTNVevkO1kjHRLtaxXAhKmouMMXd8JpVJEJKgdGrqrSKSEz3pV6kcY6WZk01bddegs56d1V5Y3j5Cf2Z3vfbdsV3w9E0OqYdo1xMJU2mF9xyw2bTuxY64cXnzxRfyu4t+4bds2AMCnPvUpPPzww/jzP/9zlMtlfO5zn0OhUMB73/tePPnkk0gkyA/on//5n7F161a8//3vh2mauO+++/Dtb3/7kp8LwzDMW8H6jllseGLHLDl33XUXhDh/IIZhGPj617+Or3/96+eV6enpwSOPPLIY3WMYhlkwWN8xiw372DEMwzAMw3QIbbtiFwUhIstE1dV9P2JKqhHbJl8Sy9R92K4eopQhiSTNX1et1BMz3vReWhJftu5G2X5590Oa3IpR2t/Qu26g/vTraQfsVE62KzXy06sWdb+NyZPk3zE7eVzbFiopCZIZWn7v69MrRRw7+ZJsDy5bLttBpSVFTJUyjhvlWTqO0P1lVF+XZJyOFRvSj1uM131Nat7C+JwwzJVOIp1GMp1GV0JPSZFOKSrappROUcuCj6H62CntqKWqTeRHyjbaidHibxsoXnxqViRh6HJdeUrBEoT0mTDS008hop0I6L65pnqAkNqhresdAeWkAyU1U6TvL64c2wmpv+ma3icxSfpv6g2KQh1ZN6LJnTEb+tS8fNM7MVcWvGLHMAzDMAzTIfDEjmEYhmEYpkNoW1OsY9lwLBuzLdUWwhot1SdTSuHrlmXyASXFybFTBdle854PaHIjN6ivydzqz5c1uVyGTKz9a2+W7bKtF8/e99KvZNut0j6KLcW4z5wYp76Huhk5kaCvZflqMrHeuFavXhFYFObvWHlqx1oyrNeo2kTlTcpkHwUtFSWUaX7JIrNFqldPJzA4XE+tUq0tTLoThrnS6cp2I9PVBWHp5seKknZIKEXc3ZYKOuUS6RpPqSDjurouCAIyl/pKGhO16gwAVCqkdytlciMJWtKiZHpIL2ZyednOZ/o0uUQsJtthS/UKGEoVCaWaUEZxQwGA6dP0uZpSkShSKvUAgAE6VhTSNctmdDP3yhVUtqtaoesnIj0lVi5T13+O1WJeZpg2hVfsGIZhGIZhOgSe2DEMwzAMw3QIPLFjGIZhGIbpENrWx86ruTCjEKm43kUjoYSym+QLIULdLyLZRXIf/qMPy/Yd97xfk8v2kZ/F5Bv7Zdsy9f0V5qlEztTRg7J9cl73M/uPH/1ItruS5C9Tc/UUJEOD5JuSzeg+bEeOUyoUT+lHz/AqTW7tDbfQi5D8R2YKevqUiuKXOFul/RlCv7a1KvnPlJRUCKJU0+TW5xvyLa4yDMNcHP/6xFNIJBIInV9o78/OUhqO0twZ2W7NvKH63KkF5MOWvCg9/QOy3d1HZQjjlq4LyjMF2T70GunFYknXY6OrV8q25ZC+y2Z6NbnVq6n02MioXuZs9VXkR9wTJ12VSej+hpFSUg2Kv5vfovstpdakpexvcFWL31+WdKYvSI9bMU0MPT3148bjen8Ypl25oBW77du347bbbkMmk8HAwADuvfdeHDx4UJOp1WrYsmULent70dXVhfvuu09TNAzDMJcDrO8YhrkcuaCJ3a5du7BlyxY899xzeOqpp+D7Pu6++26UyxRR9KUvfQk//elP8eijj2LXrl04efIkPvrRjy54xxmGYRYT1ncMw1yOXJAp9sknn9ReP/zwwxgYGMCePXvwvve9D3Nzc/iHf/gHPPLII/i93/s9AMBDDz2E9evX47nnnsPtt9/+Wx8rEl49a3pLVnFDCdcPBIXrG4ZuckjEadn+5lvIZBl39OX0V1+m6g2zJ1+XbdfVzY/zszOyfezwq7JdEklNzgnpc11KpvhsQje39neTKfbU5IS2LVDSEFTmyfRx7Mg4dPZRP0qUkiBh69ciiJP5ZTqg65JM6ukEUhk6l6RNZor5SlHfXyMdQBBxuhOmc7mU+u7nv3getu0gP7JOe1+EdP+/9Mufy/bKEb06Ql8vmT5PHCd90nqPpnrysu2ZpEsnFfcPAHj/hjHZvvnGd8l2pUUvmg79hBwZf1O2D732uib3yl7Ss/lcl7btvo/9oWzf+a61sh0T+rrDyDKqGuQppljD1CvgqBU1fKXKhWm3VKjIk/5LKpU3Ikv3MWn+Ytht67jEMDrvKHhibq7ud9bTU8/ltmfPHvi+j02bNkmZa6+9FitWrMDu3bvPuQ/XdVEsFrU/hmGYdoP1HcMwlwMXPbGLogj3338/7rzzTlx//fUAgImJCcRiMeTzeU12cHAQExMT59hL3Y8ll8vJv9HR0XPKMQzDLBWs7xiGuVy46MXlLVu2YO/evXj22WffUQceeOABbNu2Tb4uFosNZRcBiBAF+rK47VBFiVCpnOBBj4wazFE28n/7yeOy3TO4T5MbUJf3KxT56jh6lvKuNJkwbZPMAOkW0+7QAJlEqvOzsp209P1NT1GEm+/pJoJMgkyinhKF9tpLL2pypw4ckm03oILWcPQM6aHa3xHFJJzWr60ZJzNLQsm+3g3d3Lz+XasBAJWqD+C/wDCdzmLru3s/9r8jmUwhPnCNJl+Zpwnia6/QvbZsSJ8QmoopMZkgXeVFVU1u7fW0/+5l5KJR6dOrN/z+PbQKqbpolFtMsZFiBQ0EmXZrgS53+jS5srx55KS2LZWi/k4cn5bto/te0+RMpYLOGxOnZXvD3bdqcitXDcu2GjFrJlrCXR3Su4ZabcLQ9XHMqJ9XzGkJRWaYNuWiJnZbt27F448/jmeeeQYjiq/H0NAQPM9DoVDQnmInJycxNDR0jj0B8Xgc8Xj8nNsYhmGWGtZ3DMNcTlyQKVYIga1bt+Kxxx7D008/jdWrV2vbb7nlFjiOg507d8r3Dh48iPHxcYyNjbXujmEYpm1hfccwzOXIBa3YbdmyBY888gh+/OMfI5PJSD+SXC6HZDKJXC6Hz3zmM9i2bRt6enqQzWbxxS9+EWNjYxcUIcYwDLPUsL5jGOZy5IImdt/73vcAAHfddZf2/kMPPYRPf/rTAIBvfvObME0T9913H1zXxebNm/Hd7373gjsWRQaiyEDM1v3FEjb5cUAJcxeWnk4k8ihlyJkz5KdSmtKdmpM+RaVFoGP1dOuZ0/PD/bIdhK5snzip70+A/DBMky6vF7RkRzfINy+dSGnblIwusNQXLSldQo98Ak3F2aVYmdXkvDj52WSGqe/lZEGTm4/I565WpsXc3uxVmlxfw4+wXObSE0zncin1XdwxEY+ZOHRgr/Z+cY70i1DTeHj6vVcqUW49wyBdkGipluBXKC3S3BTtb3JcT3fys3/7mWzPziufKc1pcpks+cfluntkO53Vzc3Hj5Nf3UDfcm1bIku+fr/4VzruzGu/0eRCRacfnqAk0MfL85rcNevJjzCXJd2aU1JMAUAyRelOcmm6Tk5C/81Jpern4qm6mGHamAua2KmK5XwkEgns2LEDO3bsuOhOMQzDLDWs7xiGuRx5R3nsGIZhGIZhmPahbXNpm0YcpmEjEddTbQglrUk6Scvs6Yxe4LniU2h8b4bC3O2WtCjeHC3pRybJVRx92X1wkBynI8UMsu5GPQP8L39OjtSeqMi2Y+jZ0asl2pbNZLVtMSXFuWVQP0o1PYXAkVNkci0U6Lxco6zJ9a+l+fvyvJJKRejh/7NnqE+xmmIqXq6bpauVejqAapUrTzDMQjA/M4mgmsTTP/5X7f1jE8dl2/TJpeI3v2lJbKzol0B1+zB0PfbU40/LdkxJ6XTzu9+jyXmxjGwXXdILb4yf1uSmp/fTZ2p0rJMTRzW5I0dJ7tZ336Jt+z+2UPqXF56jxM7B3LQmV3TJjaSquLy88aJuRv7FnlOynbbJfOvEdBOrpUQnZxRT7MjKVZrcR+77OACgUuF0J8zlAa/YMQzDMAzDdAg8sWMYhmEYhukQ2tYU69gGYraJirL8DgBWgqJfI6WaQ8XXM6xbSpbweIzMj46jR8/GUhQplcvStompSU2uspxMrgOjV8v2idNnNLl33XanbJemKBLsjUN6xYtyqSDbtqX3PZcj06wBMm+cOqFnbB9/U4mKjVPfs4N6lG1/j7I/xZxrzOjXonuWhsPyAYpwG8nr5ubDr9Yj9ao1HwzDvHOGBgaRSqVxzSo9V55Q7n/bpLbV4tphWvSMLiLSfbGEfo/DoUjQ4WGKTr1r82ZNLJNSokkTVJXi1b16pZlDh1+nc1i+SrZrQl8zsBS3mb2HDmjbXj1EFXRSq9bL9smTejWM7jy9HoiRG0mqS3fXmZl4U7anTxyW7akzuk6vhUqUsZJV4FRB/1m84/31bdWqfs0Zpl3hFTuGYRiGYZgOgSd2DMMwDMMwHQJP7BiGYRiGYTqEtvWxG+g1kUqY8Kf1kPdqSH4mZSWrhzD11Bu2kjIkm6V0HTFHz8ReLVPagKSjXA5PvzQv/vKXsn3VOiXr+XG98oSpVMNIKVnfLUvPxJ5Mku9LuaT72FWr9DoIKLVKV1Lfxx3vXivbCSVlSmDpKV1Cn9IVVI+Rj505n9DkBlKU4uDda99F7+cHNbk9p44AAGqefhyGYS6O2TOzqCVd3L7xDu39O37nd2Q7Hqd0HbalP5ObJr2OhOKLBz3Fh++Rnqx6pBemjx/R5GYU/9mZMzOy/YbiUwcAJ0+T/usaGKYNcV23GDHysfMC3W/6qV3PyvbKNTfI9mhPS4UKpZJPSknV4tb0yhNvFMmfuUvRi6HQ9dXEbEm2+/pWyXbF11PEPL3rBQCA73OlHebygFfsGIZhGIZhOgSe2DEMwzAMw3QIbWuKHRmJoSvpIGfoS/qHj5H5YFIpYu2Fupmyq4tOrVyhtCBhVNLkLGVuOzNFZt/5kr5sX/NpH5agdqZLD8mfnCCzxfEymT0joYfKD/aTediI9LQhswWqKBFP03nlcxlNLqaYY1zFxAJbNzeXXZLzSkpFiUif1189OiTbw0PUv2PH9TQB01P178D1ufIEwywEqVQcqWQc00W9usxLv9kj2wMDpGsGB/RKO75POmR2tkAbWqrV2IquWb6aTKej3bpuOXGIqjeUS2Q6HRgc0uRSvXnZthJk9qxU9eMuW7ZCtidOHte2nZkmfbpsmPxrjJZavSVX0ZM26UU/0vVQXHFziStpYbzpKU0OJunCQSVVi+fqJtdmN36L0sEM0xbwih3DMAzDMEyHwBM7hmEYhmGYDqFtTbHZvIOulIPqVEV7v3tAifJKU6TVmUk90qrm0XK6HSMTgdcS2BQp5kQ/pH3MVWc1ubQSkVqrkJmhWtMrT3jK/kKlLYQenVYq0nlls3rm9GyWqmFUqyR3ZlrvU1cXmRwMJSrOCHSbQcym/avBarGWotirrl5Fx1UKXj/zzKua3G8O1QuBB6EePcYwzMURtyPEnQhuraC9/8tf7pRt4ZPeyaZ0neH75DpSU6Lq7ZZn95WrRmX7+tuvk+01K4Y1ucIxMpdOzJKOi7VE5q/pJdPs1BS5udyw7npN7l03rJPtH/y//6Rts0FVJHzFfcXzdHOuCBSTa4LO14rrfVq1+irZPn3sIG0wdX2XVNxc1q+nDAO1iu6uM7psAADgunp/GKZd4RU7hmEYhmGYDoEndsySsn37dtx2223IZDIYGBjAvffei4MHD2oytVoNW7ZsQW9vL7q6unDfffdhclIP6BgfH8eHPvQhpFIpDAwM4Mtf/jKCgPPsMQzTPrC+Yy4FPLFjlpRdu3Zhy5YteO655/DUU0/B933cfffdKCvZp7/0pS/hpz/9KR599FHs2rULJ0+exEc/+lG5PQxDfOhDH4LnefjlL3+J73//+3j44Yfx1a9+dSlOiWEY5pywvmMuBYYQ7RXEXSwWkcvl8NK//HdkUg6mjz6lbZ9TUnQUq+QiWJjW/b2Ks4r7YNgvm+mEHq4fqpUs3IJsz1f0ihcpxU+tK0U+cK7Qj1tRqkj4Lm0zhD6HTscpDL+rS0/pYivpSvyQQvxPTbZUuVAqW+Ty5Edox2K6nJKl/UyZ+jdf1DO2f2DTrbRNUTT/1//9E01usuH2F0UCb87WMDc3h2w2i4VgamoKAwMD2LVrF973vvdhbm4O/f39eOSRR/Cxj30MAHDgwAGsX78eu3fvxu23346f/exn+P3f/32cPHkSg4P1KhkPPvgg/uIv/gJTU1OItVyPc9Ecd8zlw0KOu6WiOe7+n29/D6lkEpMF3Y/r5Bnyb4s8uictX1+diRQ9JizyJbNsfewnFL/kZauXyXYaesqQGSXF0d7j5Nv7y+ee1eSmpyiFyFWryY/utjv0ChppRcf97Kc/1rYJn36ChpS0I6alu4BHIZ1zTKkSZMf09E7r1pGP3dEDL9NxQr3Czwt7XpLtG9+zUbarakkjAMMD9d8Pz/fwg//5A9Z3zJJwIeOOV+yYtmJurp7TqqenBwCwZ88e+L6PTZs2SZlrr70WK1aswO7duwEAu3fvxg033CCVHABs3rwZxWIR+/btw7lwXRfFYlH7YxiGuZSwvmMWA57YMW1DFEW4//77ceedd+L66+tRdRMTE4jFYsjn85rs4OAgJiYmpIyq5Jrbm9vOxfbt25HL5eTf6OjoOeUYhmEWA9Z3zGLRtulOyiUbRuQAVpf2fleaTBVOkpbw0y1Fp3M5Mk2UilWlrTuhlipKupMatTOxXk0u4dByf+BSWhTb1ufGMeWloxTtNgxdLqVUxjBbvoVANTkkaWM2n9LkZmbIlDqvmISzPXrfKwHleHntKJmYD7xyTJMb7KFl3sER5Vimbm7ua1TACKMIb84uXAqALVu2YO/evXj22WffXvgd8sADD2Dbtm3ydbFYZGXHLBnptINUKoZci2NMpp/ScLiK3km0PJPHDDK/iaSS3iilm+WiGqXymJ+nVRsrpZt4BtbkZXtNiszBrx15Xe+gQTrOSZHLx4lT45pYb1/3OdsA4FXJ9Om6VIWiXNZ1i6ukIfFdSgNlJ3S9ODhMrjdvniJ9Pzmu971WomO9vu9l6l9vvyYnuuuracJf2PROrO+YxaJtJ3bMlcXWrVvx+OOP45lnnsHIyIh8f2hoCJ7noVAoaE+xk5OTGBoakjIvvPCCtr9mFFlTppV4PI54S/4rhmGYSwHrO2YxYVMss6QIIbB161Y89thjePrpp7F69Wpt+y233ALHcbBzJyVqPXjwIMbHxzE2NgYAGBsbwyuvvILTp09LmaeeegrZbBbXXXcdGIZh2gHWd8yloG1X7E4eA1IJwC3oJtZMP5kpE0mKGM3pFlv09NCplcq0bF8o6JUsZqdjSpvetyI9S3mkBA+HoRJB1lKAWp0pGyZFvlq2fqmrIUmKlvRDjlKoO6jM0HGret9DJXq2UKJtnt4lzCim6KOH6SQL03r0l1emDw7l6Mlv/crlmlxzd34Y4ddHZ/BO2LJlCx555BH8+Mc/RiaTkT4iuVwOyWQSuVwOn/nMZ7Bt2zb09PQgm83ii1/8IsbGxnD77bcDAO6++25cd911+OM//mN84xvfwMTEBL7yla9gy5Yt/JTKXBZUSoeBMAFE+rO2Y5Bim5wk0+Frrx7V5BJK1H4sl5ftvgHd7DncR5GQtlKtpjenu2+oRWVqShWegQHdZLt8uEe2Tyn+XYcO7dfkVnk0gVFNygAwP0/nVamQ6bQ4pzv4q6bY0COdZsXTmty+vX2y7bnkhjIwoPulLb+RqmMM9NO2vn591SvR2H9tASpPsL5jLgVtO7Fjrgy+973vAQDuuusu7f2HHnoIn/70pwEA3/zmN2GaJu677z64rovNmzfju9/9rpS1LAuPP/44vvCFL2BsbAzpdBqf+tSn8PWvf/1SnQbDMMzbwvqOuRTwxI5ZUn6bNIqJRAI7duzAjh07ziuzcuVKPPHEEwvZNYZhmAWF9R1zKWAfO4ZhGIZhmA6hbVfsQqcXoROHH7tVe9+NyD/DDCgMP5EzNLl8P/nmdZvkxNZT0UPWCzPkm1I4Q3511bJ+acJASRugVJGIAn1/tSr5YagZwC1b99mbr9HnqiXdd8MR5BeSMTN0LFP3OfF96mM8TU+CCUf3s8jHaH9XIS/bN9yk+6asu/Em2V519dWyveF23bfv+Mm6r4vrBcCvj4JhmHeG8FxEFmC2PGvbPumNrEM6Y89zuzS5iUnShYZy/2/YcIsm994x0qfN5LgA8JtfP6/JlWukkw6NU1qkN44e1eSqFdINQpAOTmT1lCFFpcrN/OwZbVu5SD58qha3LV2n5zKU1mRYCTro7l2myQ0Mk4/c8LtvkO2erK7vYmqFDqWtpnABIPW9WhGIYdoZXrFjGIZhGIbpENpuxa7pg1Cp1VeZqjVP2244FDEaRbQSZ1b0pzu7THIwKdqzXNVX2MpVkquoq2g13Rci0iJX32LFzqX9hcoTrBXqoapVl/Zf83xtmxD02lZWG2ueHj7rqi8N2p8l9CdOV6kr6QXUD6el3mRFudYlJTlo1dX75zb60dxvm5Ubvig64RyuNDrhO2ueQ7VWt0T4Lc/agXIv12pkrQgjXe+oUfuGkqzcD/R7vKZEpLpKxKjr6XrWU3RSoOwjajmuUF6rK3ZRS7aASKlFK1r3cZ7vsfVt9dhqZoKg5Rx9Xzkv5XxrbkumA/PCVuyaUbGdNO6Yy4cL+c4M0Wbf8PHjxzkj9mXGsWPHtCSblyNvvPEG1qxZs9TdYC6AThh3rO8uP3jcMUvBhYy7tpvYRVGEkydPQgiBFStW4NixY8hms2//wQ6mWf6l3a6FEALz8/MYHh6GaV7eVv1CoYDu7m6Mj48jl8u9/QeYBeVCxngnjTvWd2fD+m7xiaIIBw8exHXXXdd21/lKYLH1XduZYk3TxMjICIrFeqBANpvlQdegHa9Fp0yCmjdMLpdru2t8JfHbjvFOGnes785NO16LThp3y5fXE8+343W+UlgsfXd5P3YwDMMwDMMwEp7YMQzDMAzDdAhtO7GLx+P42te+xrXvwNfiUsDXeGm50q//lX7+KnwtLg18nZeOxb72bRc8wTAMwzAMw1wcbbtixzAMwzAMw1wYPLFjGIZhGIbpEHhixzAMwzAM0yHwxI5hGIZhGKZD4IkdwzAMwzBMh9CWE7sdO3Zg1apVSCQS2LhxI1544YWl7tKis337dtx2223IZDIYGBjAvffei4MHD2oytVoNW7ZsQW9vL7q6unDfffdhcnJyiXrcWVyJY26xWagxPT4+jg996ENIpVIYGBjAl7/85bMKv1/OXIljj/Xd0nIljrnFpq30nWgzfvCDH4hYLCb+8R//Uezbt0989rOfFfl8XkxOTi511xaVzZs3i4ceekjs3btXvPzyy+KDH/ygWLFihSiVSlLm85//vBgdHRU7d+4UL774orj99tvFHXfcsYS97gyu1DG32CzEmA6CQFx//fVi06ZN4qWXXhJPPPGE6OvrEw888MBSnNKCc6WOPdZ3S8eVOuYWm3bSd203sduwYYPYsmWLfB2GoRgeHhbbt29fwl5dek6fPi0AiF27dgkhhCgUCsJxHPHoo49Kmf379wsAYvfu3UvVzY6Ax9yl4WLG9BNPPCFM0xQTExNS5nvf+57IZrPCdd1LewKLAI+9OqzvLh085i4NS6nv2soU63ke9uzZg02bNsn3TNPEpk2bsHv37iXs2aVnbm4OANDT0wMA2LNnD3zf167NtddeixUrVlxx12Yh4TF36biYMb17927ccMMNGBwclDKbN29GsVjEvn37LmHvFx4eewTru0sDj7lLx1Lqu7aa2J05cwZhGGonBQCDg4OYmJhYol5deqIowv33348777wT119/PQBgYmICsVgM+Xxek73Srs1Cw2Pu0nCxY3piYuKc301z2+UMj706rO8uHTzmLg1Lre/sd9B3ZpHYsmUL9u7di2effXapu8IwCwKPaeZ88NhgOo2lHtNttWLX19cHy7LOihKZnJzE0NDQEvXq0rJ161Y8/vjj+PnPf46RkRH5/tDQEDzPQ6FQ0OSvpGuzGPCYW3zeyZgeGho653fT3HY5w2OP9d2lhsfc4tMO+q6tJnaxWAy33HILdu7cKd+Logg7d+7E2NjYEvZs8RFCYOvWrXjsscfw9NNPY/Xq1dr2W265BY7jaNfm4MGDGB8f7/hrs5hcyWNusVmIMT02NoZXXnkFp0+fljJPPfUUstksrrvuuktzIovElTz2WN8tDVfymFts2krfLUDwx4Lygx/8QMTjcfHwww+LV199VXzuc58T+XxeixLpRL7whS+IXC4n/uM//kOcOnVK/lUqFSnz+c9/XqxYsUI8/fTT4sUXXxRjY2NibGxsCXvdGVypY26xWYgx3Qz/v/vuu8XLL78snnzySdHf399R6U6uxLHH+m7puFLH3GLTTvqu7SZ2Qgjxne98R6xYsULEYjGxYcMG8dxzzy11lxYdAOf8e+ihh6RMtVoVf/qnfyq6u7tFKpUSf/iHfyhOnTq1dJ3uIK7EMbfYLNSYPnr0qLjnnntEMpkUfX194s/+7M+E7/uX+GwWjytx7LG+W1quxDG32LSTvjMaHWIYhmEYhmEuc9rKx45hGIZhGIa5eHhixzAMwzAM0yHwxI5hGIZhGKZD4IkdwzAMwzBMh8ATO4ZhGIZhmA6BJ3YMwzAMwzAdAk/sGIZhGIZhOgSe2DEMwzAMw3QIPLFjGIZhGIbpEHhixzAMwzAM0yHwxI5hGIZhGKZD+P8BVwmQrFY6sosAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# We can also support different types of ablations. For example, we can use block or column ablations.\n", + "\n", + "(x_train, y_train), (x_test, y_test) = get_cifar_data()\n", + "for ablation_type in ['block', 'row']:\n", + " art_model = PyTorchDeRandomizedSmoothing(model='vit_small_patch16_224', # Name of the model acitecture to load\n", + " loss=torch.nn.CrossEntropyLoss(), # loss function to use\n", + " optimizer=torch.optim.SGD, # the optimizer to use: note! this is not initialised here we just supply the class!\n", + " optimizer_params={\"lr\": 0.01}, # the parameters to use\n", + " input_shape=(3, 32, 32), # the input shape of the data: Note! that if this is a different shape to what the ViT expects it will be re-scaled\n", + " nb_classes=10,\n", + " verbose=False,\n", + " ablation_type=ablation_type,\n", + " ablation_size=4, # Size of the retained column\n", + " replace_last_layer=True, # Replace the last layer with a new set of weights to fine tune on new data\n", + " load_pretrained=True) # if to load pre-trained weights for the ViT\n", + " \n", + " # We can see behind the scenes how PyTorchDeRandomizedSmoothing processes input by passing in the first few CIFAR\n", + " # images into art_model.ablator.forward along with a start position to retain pixels from the original image.\n", + " original_image = np.moveaxis(x_train, [1], [3])\n", + "\n", + " ablated = art_model.ablator.forward(torch.from_numpy(x_train[0:10]).to(device), column_pos=6)\n", + " ablated = ablated.cpu().detach().numpy()\n", + "\n", + " # Note the shape:\n", + " # - The ablator adds an extra channel to signify the ablated regions of the input.\n", + " # - The input is reshaped to be 224 x 224 to match the image shape that the ViT is expecting\n", + " print(f\"The shape of the ablated image is {ablated.shape}\")\n", + "\n", + " ablated_image = ablated[:, 0:3, :, :]\n", + " \n", + " # shift the axis to disply\n", + " ablated_image = np.moveaxis(ablated_image, [1], [3])\n", + "\n", + " # plot the figure: Note the axis scale!\n", + " f, axarr = plt.subplots(1,4)\n", + " axarr[0].imshow(original_image[0])\n", + " axarr[1].imshow(ablated_image[0])\n", + " axarr[2].imshow(original_image[1])\n", + " axarr[3].imshow(ablated_image[1])\n", + " plt.tight_layout()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "6ddf5329", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:Running algorithm: levine2020\n", + "INFO:art.estimators.classification.pytorch:Inferred 6 hidden layers on PyTorch classifier.\n", + "INFO:art.estimators.certification.derandomized_smoothing.pytorch:MNISTModel(\n", + " (conv_1): Conv2d(2, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n", + " (conv_2): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n", + " (fc1): Linear(in_features=6272, out_features=500, bias=True)\n", + " (fc2): Linear(in_features=500, out_features=100, bias=True)\n", + " (fc3): Linear(in_features=100, out_features=10, bias=True)\n", + " (relu): ReLU()\n", + ")\n", + "Normal Acc 0.965 Cert Acc 0.494: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████| 79/79 [00:02<00:00, 33.61it/s]\n" + ] + } + ], + "source": [ + "# The algorithm is general such that we do not have to supply only ViTs. \n", + "# We can use regular CNNs as well, howevever we will loose the advantages \n", + "# that were discussed at the start of the notebook. Here we will demonstrate it for a simple MNIST case \n", + "# and also illustrate the use of the algorithm in https://arxiv.org/pdf/2002.10733.pdf\n", + "\n", + "class MNISTModel(torch.nn.Module):\n", + "\n", + " def __init__(self):\n", + " super(MNISTModel, self).__init__()\n", + "\n", + " self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", + "\n", + " self.conv_1 = torch.nn.Conv2d(in_channels=2, # input channels are doubled as per https://arxiv.org/pdf/2002.10733.pdf\n", + " out_channels=64,\n", + " kernel_size=4,\n", + " stride=2,\n", + " padding=1)\n", + "\n", + " self.conv_2 = torch.nn.Conv2d(in_channels=64,\n", + " out_channels=128,\n", + " kernel_size=4,\n", + " stride=2, padding=1)\n", + "\n", + " self.fc1 = torch.nn.Linear(in_features=128*7*7, out_features=500)\n", + " self.fc2 = torch.nn.Linear(in_features=500, out_features=100)\n", + " self.fc3 = torch.nn.Linear(in_features=100, out_features=10)\n", + "\n", + " self.relu = torch.nn.ReLU()\n", + "\n", + " def forward(self, x: \"torch.Tensor\") -> \"torch.Tensor\":\n", + " \"\"\"\n", + " Computes the forward pass though the neural network\n", + " :param x: input data of shape (batch size, N features)\n", + " :return: model prediction\n", + " \"\"\"\n", + " x = self.relu(self.conv_1(x))\n", + " x = self.relu(self.conv_2(x))\n", + " x = torch.flatten(x, 1)\n", + " x = self.relu(self.fc1(x))\n", + " x = self.relu(self.fc2(x))\n", + " x = self.fc3(x)\n", + " return x\n", + "\n", + "def get_mnist_data():\n", + " \"\"\"\n", + " Get the MNIST data.\n", + " \"\"\"\n", + " train_set = datasets.MNIST('./data', train=True, download=True)\n", + " test_set = datasets.MNIST('./data', train=False, download=True)\n", + "\n", + " x_train = train_set.data.numpy().astype(np.float32)\n", + " y_train = train_set.targets.numpy()\n", + "\n", + " x_test = test_set.data.numpy().astype(np.float32)\n", + " y_test = test_set.targets.numpy()\n", + "\n", + " x_train = np.expand_dims(x_train, axis=1)\n", + " x_test = np.expand_dims(x_test, axis=1)\n", + "\n", + " x_train = x_train / 255.0\n", + " x_test = x_test / 255.0\n", + "\n", + " return (x_train, y_train), (x_test, y_test)\n", + "\n", + "\n", + "model = MNISTModel()\n", + "(x_train, y_train), (x_test, y_test) = get_mnist_data()\n", + "optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005)\n", + "\n", + "art_model = PyTorchDeRandomizedSmoothing(model=model,\n", + " loss=torch.nn.CrossEntropyLoss(),\n", + " optimizer=optimizer,\n", + " input_shape=(1, 28, 28),\n", + " nb_classes=10,\n", + " ablation_type='column',\n", + " algorithm='levine2020', # Algorithm selection\n", + " threshold=0.3, # Requires a threshold\n", + " ablation_size=2,\n", + " logits=True)\n", + "\n", + "scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[200], gamma=0.1)\n", + "\n", + "# Uncomment to train.\n", + "'''\n", + "art_model.fit(x_train, y_train,\n", + " nb_epochs=400,\n", + " scheduler=scheduler)\n", + "torch.save(art_model.model.state_dict(), 'trained_mnist.pt')\n", + "\n", + "'''\n", + "art_model.model.load_state_dict(torch.load('trained_mnist.pt'))\n", + "acc, cert_acc = art_model.eval_and_certify(x_test, y_test, size_to_certify=5)\n" + ] } ], "metadata": { @@ -984,7 +1212,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.16" + "version": "3.10.12" } }, "nbformat": 4, diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index ee3d345139..3c6bffc3bf 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -127,7 +127,7 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): @pytest.mark.only_with_platform("pytorch") -def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): +def test_ablation_row(art_warning, fix_get_mnist_data, fix_get_cifar10_data): """ Check that the ablation is being performed correctly """ @@ -270,8 +270,8 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 import sys from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing - import shutil + # import shutil # if os.path.exists('smoothed-vit'): # shutil.rmtree('smoothed-vit') @@ -330,33 +330,36 @@ def forward(self, ones_mask): ColumnAblator, BlockAblator, ) - from custom_models.vision_transformer import vit_small_patch16_224, vit_base_patch16_224 + from custom_models.vision_transformer import vit_base_patch16_224 cifar_data = fix_get_cifar10_data[0][:50] - cifar_labels = fix_get_cifar10_data[1][:50] ''' - timm config for: + timm config for: def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). - ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + ImageNet-1k weights fine-tuned from in21k @ 224x224, + source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) - model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + model = _create_vision_transformer('vit_base_patch16_224', + pretrained=pretrained, **dict(model_args, **kwargs)) return model - - + + def vit_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Small (ViT-S/16) """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) - model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + model = _create_vision_transformer('vit_small_patch16_224', + pretrained=pretrained, **dict(model_args, **kwargs)) return model - - smooth repo config for: + + smooth repo config for: def vit_small_patch16_224(pretrained=False, **kwargs): if pretrained: - # NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model + # NOTE my scale was wrong for original weights, leaving this here + # until I have better ones for this model kwargs.setdefault('qk_scale', 768 ** -0.5) model = VisionTransformer(patch_size=16, embed_dim=768, depth=8, num_heads=8, mlp_ratio=3., **kwargs) model.default_cfg = default_cfgs['vit_small_patch16_224'] @@ -364,16 +367,18 @@ def vit_small_patch16_224(pretrained=False, **kwargs): load_pretrained( model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3), filter_fn=_conv_filter) return model - - + + def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). - ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + ImageNet-1k weights fine-tuned from in21k @ 224x224, + source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) - model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + model = _create_vision_transformer('vit_base_patch16_224', + pretrained=pretrained, **dict(model_args, **kwargs)) return model - + ''' art_model = PyTorchDeRandomizedSmoothing( @@ -404,6 +409,10 @@ def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: output_shape=(3, 224, 224), ) ablated = ablator.forward(cifar_data, column_pos=10) + madry_preds = madry_vit(ablated) + art_preds = art_model.model(ablated) + assert torch.allclose(madry_preds, art_preds, rtol=1e-04, atol=1e-04) + elif ablation == "block": ablator = BlockAblator( ablation_size=4, @@ -414,10 +423,11 @@ def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: mode="ViT", ) ablated = ablator.forward(cifar_data, column_pos=10, row_pos=28) + madry_preds = madry_vit(ablated) + art_preds = art_model.model(ablated) + assert torch.allclose(madry_preds, art_preds, rtol=1e-04, atol=1e-04) - madry_preds = madry_vit(ablated) - art_preds = art_model.model(ablated) - assert torch.allclose(madry_preds, art_preds, rtol=1e-04, atol=1e-04) + sys.path.remove("smoothed-vit/src/utils/") @pytest.mark.only_with_platform("pytorch") @@ -435,6 +445,7 @@ def test_certification_equivalence(art_warning, fix_get_mnist_data, fix_get_cifa from torch.utils.data import Dataset device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + experiment_file_directory = "smooth_vit_tests" class ArgClass: def __init__(self): @@ -443,7 +454,7 @@ def __init__(self): self.certify_stride = 1 self.dataset = "cifar10" self.certify_out_dir = "./" - self.exp_name = "tests" + self.exp_name = experiment_file_directory if ablation == "column": self.certify_mode = "col" if ablation == "block": @@ -465,11 +476,8 @@ def __getitem__(self, idx): import shutil from torch.utils.data import DataLoader - # if os.path.exists('smoothed-vit'): - # shutil.rmtree('smoothed-vit') - - if os.path.exists("tests"): - shutil.rmtree("tests") + if os.path.exists(experiment_file_directory): + shutil.rmtree(experiment_file_directory) os.system("git clone https://github.com/MadryLab/smoothed-vit") sys.path.append("smoothed-vit/src/utils/") @@ -480,7 +488,6 @@ def __getitem__(self, idx): loss=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.SGD, optimizer_params={"lr": 0.01}, - # input_shape=(3, 224, 224), input_shape=(3, 32, 32), nb_classes=10, ablation_type=ablation, @@ -510,6 +517,7 @@ def forward(self, x): x = self.model(x) return x, "filler_arg" + # Replacement function for .cuda() to enable original code to run without gpu. def _cuda(self): return self @@ -557,8 +565,8 @@ def __getitem__(self, idx): model = WrappedModel(my_model=art_model.model) certify(args=args, model=model, validation_loader=validation_loader, store=None) - summary = torch.load("tests/m4_s4_summary.pth") - print("the summary is ", summary) + summary = torch.load(experiment_file_directory + "/m4_s4_summary.pth") + acc, cert_acc = art_model.eval_and_certify( x=cifar_data.cpu().numpy(), y=cifar_labels.cpu().numpy(), batch_size=num_to_fetch, size_to_certify=4 ) @@ -570,8 +578,9 @@ def __getitem__(self, idx): cifar_data = upsample(cifar_data) acc_non_ablation = art_model.model(cifar_data) acc_non_ablation = art_model.get_accuracy(acc_non_ablation, cifar_labels) - print("acc non ablation ", acc_non_ablation) + assert np.allclose(acc_non_ablation.astype(float), summary["acc"]) + sys.path.remove("smoothed-vit/src/utils/") @pytest.mark.only_with_platform("pytorch") @@ -661,7 +670,9 @@ def embedder(cls, x, pos_embed, cls_token): self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02) From timm dvit: - self.pos_embed = nn.Parameter(torch.zeros(1, self.patch_embed.num_patches + self.num_prefix_tokens, self.embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, + self.patch_embed.num_patches + self.num_prefix_tokens, + self.embed_dim)) From repo: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) From 239e3c0f67d81a19d8bcdb4cf341a9b7233afc6b Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Fri, 4 Aug 2023 17:32:16 +0100 Subject: [PATCH 35/55] refactor to reflect correct input dimensionality. Removing general development file Signed-off-by: GiulioZizzo --- .../derandomized_smoothing/pytorch.py | 2 +- .../derandomized_smoothing/tensorflow.py | 2 + dev.py | 191 ------------------ .../test_derandomized_smoothing.py | 18 +- 4 files changed, 12 insertions(+), 201 deletions(-) delete mode 100644 dev.py diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index c38c8c8eae..826f5372a9 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -135,7 +135,7 @@ def __init__( if not channels_first: raise ValueError("Channels must be set to first") - logging.info("Running algorithm: %s" % algorithm) + logging.info("Running algorithm: %s", algorithm) # Default value for output shape output_shape = input_shape diff --git a/art/estimators/certification/derandomized_smoothing/tensorflow.py b/art/estimators/certification/derandomized_smoothing/tensorflow.py index 504ddefda6..c50513bc6a 100644 --- a/art/estimators/certification/derandomized_smoothing/tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/tensorflow.py @@ -106,6 +106,8 @@ def __init__( used for data preprocessing. The first value will be subtracted from the input. The input will then be divided by the second one. """ + # input channels are internally doubled for the certification algorithm. + input_shape = (input_shape[0], input_shape[1], input_shape[2] * 2) super().__init__( model=model, nb_classes=nb_classes, diff --git a/dev.py b/dev.py deleted file mode 100644 index b31be5abca..0000000000 --- a/dev.py +++ /dev/null @@ -1,191 +0,0 @@ -import torch -import ssl -# ssl._create_default_https_context = ssl._create_unverified_context -from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing -import numpy as np -from torchvision import datasets -from torchvision import transforms - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - -def get_cifar_data(): - """ - Get CIFAR-10 data. - :return: cifar train/test data. - """ - train_set = datasets.CIFAR10('./data', train=True, download=True) - test_set = datasets.CIFAR10('./data', train=False, download=True) - - x_train = train_set.data.astype(np.float32) - y_train = np.asarray(train_set.targets) - - x_test = test_set.data.astype(np.float32) - y_test = np.asarray(test_set.targets) - - x_train = np.moveaxis(x_train, [3], [1]) - x_test = np.moveaxis(x_test, [3], [1]) - - x_train = x_train / 255.0 - x_test = x_test / 255.0 - - return (x_train, y_train), (x_test, y_test) - -def get_mnist_data(): - """ - Get the MNIST data. - """ - train_set = datasets.MNIST('./data', train=True, download=True) - test_set = datasets.MNIST('./data', train=False, download=True) - - x_train = train_set.data.numpy().astype(np.float32) - y_train = train_set.targets.numpy() - - x_test = test_set.data.numpy().astype(np.float32) - y_test = test_set.targets.numpy() - - x_train = np.expand_dims(x_train, axis=1) - x_test = np.expand_dims(x_test, axis=1) - - x_train = x_train / 255.0 - x_test = x_test / 255.0 - - return (x_train, y_train), (x_test, y_test) - -def vit_dev(): - (x_train, y_train), (x_test, y_test) = get_cifar_data() - - art_model = PyTorchDeRandomizedSmoothing(model='vit_small_patch16_224', - loss=torch.nn.CrossEntropyLoss(), - optimizer=torch.optim.SGD, - optimizer_params={"lr": 0.01}, - input_shape=(3, 32, 32), - nb_classes=10, - ablation_size=4, - replace_last_layer=True, - load_pretrained=True) - # art_model.predict(x_train[0:10]) - - scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1) - art_model.fit(x_train, y_train, - nb_epochs=30, - update_batchnorm=False, - scheduler=scheduler, - transform=transforms.Compose([transforms.RandomHorizontalFlip()])) - - torch.save(art_model.model.state_dict(), 'trained_refactor.pt') - art_model.model.load_state_dict(torch.load('trained_refactor.pt')) - art_model.eval_and_certify(x_test, y_test, size_to_certify=4) - - -def cnn_dev(algo='salman2021'): - - assert algo in ['levine2020', 'salman2021'] - - if algo == 'salman2021': - class MNISTModel(torch.nn.Module): - - def __init__(self): - super(MNISTModel, self).__init__() - - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - self.conv_1 = torch.nn.Conv2d(in_channels=1, - out_channels=64, - kernel_size=4, - stride=2, - padding=1) - - self.conv_2 = torch.nn.Conv2d(in_channels=64, - out_channels=128, - kernel_size=4, - stride=2, padding=1) - - self.fc1 = torch.nn.Linear(in_features=128*7*7, out_features=500) - self.fc2 = torch.nn.Linear(in_features=500, out_features=100) - self.fc3 = torch.nn.Linear(in_features=100, out_features=10) - - self.relu = torch.nn.ReLU() - - def forward(self, x: "torch.Tensor") -> "torch.Tensor": - """ - Computes the forward pass though the neural network - :param x: input data of shape (batch size, N features) - :return: model prediction - """ - x = self.relu(self.conv_1(x)) - x = self.relu(self.conv_2(x)) - x = torch.flatten(x, 1) - x = self.relu(self.fc1(x)) - x = self.relu(self.fc2(x)) - x = self.fc3(x) - return x - else: - class MNISTModel(torch.nn.Module): - - def __init__(self): - super(MNISTModel, self).__init__() - - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - self.conv_1 = torch.nn.Conv2d(in_channels=2, - out_channels=64, - kernel_size=4, - stride=2, - padding=1) - - self.conv_2 = torch.nn.Conv2d(in_channels=64, - out_channels=128, - kernel_size=4, - stride=2, padding=1) - - self.fc1 = torch.nn.Linear(in_features=128*7*7, out_features=500) - self.fc2 = torch.nn.Linear(in_features=500, out_features=100) - self.fc3 = torch.nn.Linear(in_features=100, out_features=10) - - self.relu = torch.nn.ReLU() - - def forward(self, x: "torch.Tensor") -> "torch.Tensor": - x = self.relu(self.conv_1(x)) - x = self.relu(self.conv_2(x)) - x = torch.flatten(x, 1) - x = self.relu(self.fc1(x)) - x = self.relu(self.fc2(x)) - x = self.fc3(x) - return x - - model = MNISTModel() - # (x_train, y_train), (x_test, y_test) = get_cifar_data() - (x_train, y_train), (x_test, y_test) = get_mnist_data() - optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005) - - if algo == 'salman2021': - art_model = PyTorchDeRandomizedSmoothing(model=model, - loss=torch.nn.CrossEntropyLoss(), - optimizer=optimizer, - input_shape=(1, 28, 28), - nb_classes=10, - ablation_type='column', - ablation_size=2, - algorithm=algo, - logits=True) - else: - art_model = PyTorchDeRandomizedSmoothing(model=model, - loss=torch.nn.CrossEntropyLoss(), - optimizer=optimizer, - input_shape=(1, 28, 28), - nb_classes=10, - ablation_type='column', - ablation_size=2, - algorithm=algo, - threshold=0.3, - logits=True) - - scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[200], gamma=0.1) - - art_model.fit(x_train, y_train, - nb_epochs=400, - scheduler=scheduler) - -vit_dev() -# cnn_dev() \ No newline at end of file diff --git a/tests/estimators/certification/test_derandomized_smoothing.py b/tests/estimators/certification/test_derandomized_smoothing.py index 41d71ddcb4..a7b3bd9705 100644 --- a/tests/estimators/certification/test_derandomized_smoothing.py +++ b/tests/estimators/certification/test_derandomized_smoothing.py @@ -118,10 +118,10 @@ def forward(self, x): for dataset, dataset_name in zip([fix_get_mnist_data, fix_get_cifar10_data], ["mnist", "cifar"]): if dataset_name == "mnist": ptc = SmallMNISTModel().to(device) - input_shape = (2, 28, 28) + input_shape = (1, 28, 28) else: ptc = SmallCIFARModel().to(device) - input_shape = (6, 32, 32) + input_shape = (3, 32, 32) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(ptc.parameters(), lr=0.01, momentum=0.9) @@ -153,7 +153,7 @@ def test_tf2_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data): import tensorflow as tf def build_model(input_shape): - img_inputs = tf.keras.Input(shape=input_shape) + img_inputs = tf.keras.Input(shape=(input_shape[0], input_shape[1], input_shape[2] * 2)) x = tf.keras.layers.Conv2D(filters=32, kernel_size=(4, 4), strides=(2, 2), activation="relu")(img_inputs) x = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)(x) # tensorflow uses channels last and we are loading weights from an originally trained pytorch model @@ -168,9 +168,9 @@ def build_model(input_shape): for dataset, dataset_name in zip([fix_get_mnist_data, fix_get_cifar10_data], ["mnist", "cifar"]): if dataset_name == "mnist": - input_shape = (28, 28, 2) + input_shape = (28, 28, 1) else: - input_shape = (32, 32, 6) + input_shape = (32, 32, 3) net = build_model(input_shape=input_shape) try: @@ -262,7 +262,7 @@ def load_weights(self): clip_values=(0, 1), loss=criterion, optimizer=optimizer, - input_shape=(2, 28, 28), + input_shape=(1, 28, 28), nb_classes=10, ablation_type=ablation_type, ablation_size=ablation_size, @@ -292,7 +292,7 @@ def test_tf2_mnist_certification(art_warning, fix_get_mnist_data): import tensorflow as tf def build_model(input_shape): - img_inputs = tf.keras.Input(shape=input_shape) + img_inputs = tf.keras.Input(shape=(input_shape[0], input_shape[1], input_shape[2] * 2)) x = tf.keras.layers.Conv2D(filters=32, kernel_size=(4, 4), strides=(2, 2), activation="relu")(img_inputs) x = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)(x) # tensorflow uses channels last and we are loading weights from an originally trained pytorch model @@ -324,7 +324,7 @@ def get_weights(): weight_list.append(w) return weight_list - net = build_model(input_shape=(28, 28, 2)) + net = build_model(input_shape=(28, 28, 1)) net.set_weights(get_weights()) loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True) @@ -348,7 +348,7 @@ def get_weights(): clip_values=(0, 1), loss_object=loss_object, optimizer=optimizer, - input_shape=(28, 28, 2), + input_shape=(28, 28, 1), nb_classes=10, ablation_type=ablation_type, ablation_size=ablation_size, From 2731bbb17f58546d18b5fc9c0e0461271a709e9a Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Sun, 6 Aug 2023 15:29:57 +0100 Subject: [PATCH 36/55] remove logging.basicConfig(level=logging.INFO) from pytorch vit file Signed-off-by: GiulioZizzo --- .../derandomized_smoothing/vision_transformers/pytorch.py | 1 - 1 file changed, 1 deletion(-) diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py index 6e79a85465..d9675df997 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py @@ -32,7 +32,6 @@ if TYPE_CHECKING: from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT -logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) From 7e49baafa3031faa31666cb90e9a1d6533d7c54e Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Mon, 14 Aug 2023 10:24:54 +0100 Subject: [PATCH 37/55] Enabling logging to be seen on console. Resolve conflict with lingvo in test CI pipeline Signed-off-by: GiulioZizzo --- .github/workflows/ci-pytorch.yml | 5 +++++ .../certification/derandomized_smoothing/pytorch.py | 6 ++++-- .../derandomized_smoothing/vision_transformers/pytorch.py | 4 +++- requirements_test.txt | 3 ++- 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-pytorch.yml b/.github/workflows/ci-pytorch.yml index d162dfdcbd..d4e03ebf17 100644 --- a/.github/workflows/ci-pytorch.yml +++ b/.github/workflows/ci-pytorch.yml @@ -34,24 +34,28 @@ jobs: torch: 1.11.0+cpu torchvision: 0.12.0+cpu torchaudio: 0.11.0 + timm: 0.9.2 - name: PyTorch 1.12.1 (Python 3.9) framework: pytorch python: 3.8 torch: 1.12.1+cpu torchvision: 0.13.1+cpu torchaudio: 0.12.1 + timm: 0.9.2 - name: PyTorch 1.13.1 (Python 3.9) framework: pytorch python: 3.9 torch: 1.13.1+cpu torchvision: 0.14.1+cpu torchaudio: 0.13.1 + timm: 0.9.2 - name: PyTorch 1.13.1 (Python 3.10) framework: pytorch python: '3.10' torch: 1.13.1+cpu torchvision: 0.14.1+cpu torchaudio: 0.13.1 + timm: 0.9.2 name: ${{ matrix.name }} steps: @@ -72,6 +76,7 @@ jobs: pip install torch==${{ matrix.torch }} -f https://download.pytorch.org/whl/cpu/torch_stable.html pip install torchvision==${{ matrix.torchvision }} -f https://download.pytorch.org/whl/cpu/torch_stable.html pip install torchaudio==${{ matrix.torchaudio }} -f https://download.pytorch.org/whl/cpu/torch_stable.html + pip install torch==${{ matrix.timm }} pip list - name: Run Tests run: ./run_tests.sh ${{ matrix.framework }} diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 826f5372a9..d406f8179d 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -132,10 +132,12 @@ def __init__( """ import torch + logging.basicConfig() + logger.setLevel(logging.INFO) if not channels_first: raise ValueError("Channels must be set to first") - logging.info("Running algorithm: %s", algorithm) + logger.info("Running algorithm: %s", algorithm) # Default value for output shape output_shape = input_shape @@ -173,7 +175,7 @@ def __init__( supported_models = self.get_models() if pretrained_cfg["architecture"] not in supported_models: raise ValueError( - "Architecture not supported. Use PyTorchSmoothedViT.get_models() " + "Architecture not supported. Use PyTorchDeRandomizedSmoothing.get_models() " "to get the supported model architectures." ) model = timm.create_model( diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py index d9675df997..17a14cfc76 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py @@ -58,6 +58,8 @@ def get_models(cls, generate_from_null: bool = False) -> List[str]: """ import timm import torch + logging.basicConfig() + logger.setLevel(logging.INFO) supported_models = [ "vit_base_patch8_224", @@ -116,7 +118,7 @@ def get_models(cls, generate_from_null: bool = False) -> List[str]: for model in models: logger.info("Testing %s creation", model) try: - _ = PyTorchSmoothedViT( + _ = cls( model=model, loss=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.SGD, diff --git a/requirements_test.txt b/requirements_test.txt index b76cb982e9..9e67e12dea 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -35,7 +35,8 @@ torchaudio==0.13.1+cpu torchvision==0.14.1+cpu # PyTorch image transformers -timm==0.9.2 +# Due to conflicts with the old lingvo version we do not install by default. +# timm==0.9.2 catboost==1.1.1 GPy==1.10.0 From 9d8fb4f7efcc4c9e5f7c9f1d06490cbdb410bf2c Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Mon, 14 Aug 2023 11:01:50 +0100 Subject: [PATCH 38/55] bugfix in pytorch ci for timm Signed-off-by: GiulioZizzo --- .github/workflows/ci-pytorch.yml | 2 +- art/estimators/certification/derandomized_smoothing/pytorch.py | 1 + .../derandomized_smoothing/vision_transformers/pytorch.py | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci-pytorch.yml b/.github/workflows/ci-pytorch.yml index d4e03ebf17..e9b1a741c9 100644 --- a/.github/workflows/ci-pytorch.yml +++ b/.github/workflows/ci-pytorch.yml @@ -76,7 +76,7 @@ jobs: pip install torch==${{ matrix.torch }} -f https://download.pytorch.org/whl/cpu/torch_stable.html pip install torchvision==${{ matrix.torchvision }} -f https://download.pytorch.org/whl/cpu/torch_stable.html pip install torchaudio==${{ matrix.torchaudio }} -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install torch==${{ matrix.timm }} + pip install timm==${{ matrix.timm }} pip list - name: Run Tests run: ./run_tests.sh ${{ matrix.framework }} diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index d406f8179d..9613451de1 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -132,6 +132,7 @@ def __init__( """ import torch + logging.basicConfig() logger.setLevel(logging.INFO) diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py index 17a14cfc76..a8850ebed4 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py @@ -58,6 +58,7 @@ def get_models(cls, generate_from_null: bool = False) -> List[str]: """ import timm import torch + logging.basicConfig() logger.setLevel(logging.INFO) From 681f087b4d4e9c3d96538f4863c8dbc15935c1c4 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Fri, 25 Aug 2023 16:11:14 +0100 Subject: [PATCH 39/55] updates to tensorflow classifier Signed-off-by: GiulioZizzo --- .../derandomized_smoothing/__init__.py | 1 - .../derandomized_smoothing.py | 195 ++++-------------- .../derandomized_smoothing_pytorch.py | 5 +- .../derandomized_smoothing/pytorch.py | 8 + .../derandomized_smoothing/tensorflow.py | 161 +++++++++++++-- .../test_derandomized_smoothing.py | 6 +- 6 files changed, 194 insertions(+), 182 deletions(-) diff --git a/art/estimators/certification/derandomized_smoothing/__init__.py b/art/estimators/certification/derandomized_smoothing/__init__.py index 1eea6eb3da..69753f4f39 100644 --- a/art/estimators/certification/derandomized_smoothing/__init__.py +++ b/art/estimators/certification/derandomized_smoothing/__init__.py @@ -1,6 +1,5 @@ """ DeRandomized smoothing estimators. """ -from art.estimators.certification.derandomized_smoothing.derandomized_smoothing import DeRandomizedSmoothingMixin from art.estimators.certification.derandomized_smoothing.pytorch import PyTorchDeRandomizedSmoothing from art.estimators.certification.derandomized_smoothing.tensorflow import TensorFlowV2DeRandomizedSmoothing diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py index 387d300130..dbde0abbce 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py @@ -24,120 +24,12 @@ from __future__ import absolute_import, division, print_function, unicode_literals from abc import ABC, abstractmethod -from typing import Optional, Union, TYPE_CHECKING +from typing import Optional, Union, Tuple import random +import tensorflow as tf import numpy as np -if TYPE_CHECKING: - from art.utils import ABLATOR_TYPE - - -class DeRandomizedSmoothingMixin(ABC): - """ - Implementation of (De)Randomized Smoothing applied to classifier predictions as introduced - in Levine et al. (2020). - - | Paper link: https://arxiv.org/abs/2002.10733 - """ - - def __init__( - self, - ablation_type: str, - ablation_size: int, - threshold: float, - logits: bool, - channels_first: bool, - *args, - **kwargs, - ) -> None: - """ - Create a derandomized smoothing wrapper. - - :param ablation_type: The type of ablations to perform. Currently must be either "column", "row", or "block" - :param ablation_size: Size of the retained image patch. - An int specifying the width of the column for column ablation - Or an int specifying the height/width of a square for block ablation - :param threshold: The minimum threshold to count a prediction. - :param logits: if the model returns logits or normalized probabilities - :param channels_first: If the channels are first or last. - """ - super().__init__(*args, **kwargs) # type: ignore - self.ablation_type = ablation_type - self.logits = logits - self.threshold = threshold - self._channels_first = channels_first - if TYPE_CHECKING: - self.ablator: ABLATOR_TYPE # pylint: disable=used-before-assignment - - if self.ablation_type in {"column", "row"}: - row_ablation_mode = self.ablation_type == "row" - self.ablator = ColumnAblator( - ablation_size=ablation_size, channels_first=self._channels_first, row_ablation_mode=row_ablation_mode - ) - elif self.ablation_type == "block": - self.ablator = BlockAblator(ablation_size=ablation_size, channels_first=self._channels_first) - else: - raise ValueError("Ablation type not supported. Must be either column or block") - - def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: - """ - Perform prediction for a batch of inputs. - - :param x: Input samples. - :param batch_size: Size of batches. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. - :return: Array of predictions of shape `(nb_inputs, nb_classes)`. - """ - raise NotImplementedError - - def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: - """ - Performs cumulative predictions over every ablation location - - :param x: Unablated image - :param batch_size: the batch size for the prediction - :param training_mode: if to run the classifier in training mode - :return: cumulative predictions after sweeping over all the ablation configurations. - """ - if self._channels_first: - columns_in_data = x.shape[-1] - rows_in_data = x.shape[-2] - else: - columns_in_data = x.shape[-2] - rows_in_data = x.shape[-3] - - if self.ablation_type in {"column", "row"}: - if self.ablation_type == "column": - ablate_over_range = columns_in_data - else: - # image will be transposed, so loop over the number of rows - ablate_over_range = rows_in_data - - for ablation_start in range(ablate_over_range): - ablated_x = self.ablator.forward(np.copy(x), column_pos=ablation_start) - if ablation_start == 0: - preds = self._predict_classifier( - ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs - ) - else: - preds += self._predict_classifier( - ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs - ) - elif self.ablation_type == "block": - for xcorner in range(rows_in_data): - for ycorner in range(columns_in_data): - ablated_x = self.ablator.forward(np.copy(x), row_pos=xcorner, column_pos=ycorner) - if ycorner == 0 and xcorner == 0: - preds = self._predict_classifier( - ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs - ) - else: - preds += self._predict_classifier( - ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs - ) - return preds - class BaseAblator(ABC): """ @@ -231,7 +123,9 @@ def __call__( """ return self.forward(x=x, column_pos=column_pos) - def certify(self, preds: np.ndarray, size_to_certify: int, label: Optional[np.ndarray] = None) -> np.ndarray: + def certify( + self, preds: tf.Tensor, size_to_certify: int, label: Optional[np.ndarray] = None + ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. @@ -241,16 +135,20 @@ def certify(self, preds: np.ndarray, size_to_certify: int, label: Optional[np.nd :param label: Ground truth labels :return: Array of bools indicating if a point is certified against the given patch dimensions. """ - indices = np.argsort(-preds, axis=1, kind="stable") - values = np.take_along_axis(np.copy(preds), indices, axis=1) + result = tf.math.top_k(preds, k=2) - num_affected_classifications = size_to_certify + self.ablation_size - 1 + top_predicted_class, second_predicted_class = result.indices[:, 0], result.indices[:, 1] + top_class_counts, second_class_counts = result.values[:, 0], result.values[:, 1] - margin = values[:, 0] - values[:, 1] + certs = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) - certs = margin > 2 * num_affected_classifications - tie_break_certs = (margin == 2 * num_affected_classifications) & (indices[:, 0] < indices[:, 1]) - return np.logical_or(certs, tie_break_certs) + tie_break_certs = ( + (top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1) + ) & (top_predicted_class < second_predicted_class) + cert = tf.math.logical_or(certs, tie_break_certs) + cert_and_correct = cert & (label == top_predicted_class) + + return cert, cert_and_correct, top_predicted_class def ablate(self, x: np.ndarray, column_pos: int, row_pos=None) -> np.ndarray: """ @@ -350,7 +248,9 @@ def __call__( """ return self.forward(x=x, row_pos=row_pos, column_pos=column_pos) - def certify(self, preds: np.ndarray, size_to_certify: int, label: Optional[np.ndarray] = None) -> np.ndarray: + def certify( + self, preds: np.ndarray, size_to_certify: int, label: Optional[np.ndarray] = None + ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. @@ -360,15 +260,19 @@ def certify(self, preds: np.ndarray, size_to_certify: int, label: Optional[np.nd :param label: Ground truth labels :return: Array of bools indicating if a point is certified against the given patch dimensions. """ - indices = np.argsort(-preds, axis=1, kind="stable") - values = np.take_along_axis(np.copy(preds), indices, axis=1) - margin = values[:, 0] - values[:, 1] + result = tf.math.top_k(preds, k=2) + + top_predicted_class, second_predicted_class = result.indices[:, 0], result.indices[:, 1] + top_class_counts, second_class_counts = result.values[:, 0], result.values[:, 1] - num_affected_classifications = (size_to_certify + self.ablation_size - 1) ** 2 + certs = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) ** 2 + tie_break_certs = ( + (top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1) ** 2 + ) & (top_predicted_class < second_predicted_class) + cert = tf.math.logical_or(certs, tie_break_certs) + cert_and_correct = cert & (label == top_predicted_class) - certs = margin > 2 * num_affected_classifications - tie_break_certs = (margin == 2 * num_affected_classifications) & (indices[:, 0] < indices[:, 1]) - return np.logical_or(certs, tie_break_certs) + return cert, cert_and_correct, top_predicted_class def forward( self, @@ -418,40 +322,17 @@ def ablate(self, x: np.ndarray, column_pos: int, row_pos: int) -> np.ndarray: :return: Data ablated at all locations aside from the specified block. """ k = self.ablation_size - num_of_image_columns = x.shape[3] - num_of_image_rows = x.shape[2] - - if row_pos + k > x.shape[2] and column_pos + k > x.shape[3]: - start_of_ablation = column_pos + k - num_of_image_columns - x[:, :, :, start_of_ablation:column_pos] = 0.0 - - start_of_ablation = row_pos + k - num_of_image_rows - x[:, :, start_of_ablation:row_pos, :] = 0.0 - - # only the row wraps - elif row_pos + k > x.shape[2] and column_pos + k <= x.shape[3]: - x[:, :, :, :column_pos] = 0.0 - x[:, :, :, column_pos + k :] = 0.0 - - start_of_ablation = row_pos + k - num_of_image_rows - x[:, :, start_of_ablation:row_pos, :] = 0.0 - - # only column wraps - elif row_pos + k <= x.shape[2] and column_pos + k > x.shape[3]: - start_of_ablation = column_pos + k - num_of_image_columns - x[:, :, :, start_of_ablation:column_pos] = 0.0 - - x[:, :, :row_pos, :] = 0.0 - x[:, :, row_pos + k :, :] = 0.0 - - # neither wraps - elif row_pos + k <= x.shape[2] and column_pos + k <= x.shape[3]: + # Column ablations + if column_pos + k > x.shape[-1]: + x[:, :, :, (column_pos + k) % x.shape[-1] : column_pos] = 0.0 + else: x[:, :, :, :column_pos] = 0.0 x[:, :, :, column_pos + k :] = 0.0 + # Row ablations + if row_pos + k > x.shape[-2]: + x[:, :, (row_pos + k) % x.shape[-2] : row_pos, :] = 0.0 + else: x[:, :, :row_pos, :] = 0.0 x[:, :, row_pos + k :, :] = 0.0 - else: - raise ValueError(f"Ablation failed on row: {row_pos} and column: {column_pos} with size {k}") - return x diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py index eecc13693b..3a7bf434f6 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py @@ -207,13 +207,14 @@ def certify( cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) - cert_and_correct = cert & (label == top_predicted_class) - if self.algorithm == "levine2020": tie_break_certs = ( (top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1) ) & (top_predicted_class < second_predicted_class) cert = torch.logical_or(cert, tie_break_certs) + + cert_and_correct = cert & (label == top_predicted_class) + return cert, cert_and_correct, top_predicted_class_argmax diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 9613451de1..638fe4ad3f 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -583,6 +583,14 @@ def _predict_classifier( return outputs def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: + """ + Performs cumulative predictions over every ablation location + + :param x: Unablated image + :param batch_size: the batch size for the prediction + :param training_mode: if to run the classifier in training mode + :return: cumulative predictions after sweeping over all the ablation configurations. + """ if self._channels_first: columns_in_data = x.shape[-1] rows_in_data = x.shape[-2] diff --git a/art/estimators/certification/derandomized_smoothing/tensorflow.py b/art/estimators/certification/derandomized_smoothing/tensorflow.py index c50513bc6a..c0ed0c8302 100644 --- a/art/estimators/certification/derandomized_smoothing/tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/tensorflow.py @@ -29,21 +29,19 @@ from tqdm import tqdm from art.estimators.classification.tensorflow import TensorFlowV2Classifier -from art.estimators.certification.derandomized_smoothing.derandomized_smoothing import DeRandomizedSmoothingMixin from art.utils import check_and_transform_label_format if TYPE_CHECKING: # pylint: disable=C0412 import tensorflow as tf - - from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE + from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE, ABLATOR_TYPE from art.defences.preprocessor import Preprocessor from art.defences.postprocessor import Postprocessor logger = logging.getLogger(__name__) -class TensorFlowV2DeRandomizedSmoothing(DeRandomizedSmoothingMixin, TensorFlowV2Classifier): +class TensorFlowV2DeRandomizedSmoothing(TensorFlowV2Classifier): """ Implementation of (De)Randomized Smoothing applied to classifier predictions as introduced in Levine et al. (2020). @@ -120,12 +118,31 @@ def __init__( preprocessing_defences=preprocessing_defences, postprocessing_defences=postprocessing_defences, preprocessing=preprocessing, - ablation_type=ablation_type, - ablation_size=ablation_size, - threshold=threshold, - logits=logits, ) + self.ablation_type = ablation_type + self.logits = logits + self.threshold = threshold + self._channels_first = channels_first + + from art.estimators.certification.derandomized_smoothing.derandomized_smoothing import ( + ColumnAblator, + BlockAblator, + ) + + if TYPE_CHECKING: + self.ablator: ABLATOR_TYPE # pylint: disable=used-before-assignment + + if self.ablation_type in {"column", "row"}: + row_ablation_mode = self.ablation_type == "row" + self.ablator = ColumnAblator( + ablation_size=ablation_size, channels_first=self._channels_first, row_ablation_mode=row_ablation_mode + ) + elif self.ablation_type == "block": + self.ablator = BlockAblator(ablation_size=ablation_size, channels_first=self._channels_first) + else: + raise ValueError("Ablation type not supported. Must be either column or block") + def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: import tensorflow as tf @@ -136,9 +153,6 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo outputs = tf.nn.softmax(outputs) return np.asarray(outputs >= self.threshold).astype(int) - def _fit_classifier(self, x: np.ndarray, y: np.ndarray, batch_size: int, nb_epochs: int, **kwargs) -> None: - return TensorFlowV2Classifier.fit(self, x, y, batch_size=batch_size, nb_epochs=nb_epochs, **kwargs) - def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, **kwargs) -> None: """ Fit the classifier on the training set `(x, y)`. @@ -200,15 +214,124 @@ def train_step(model, images, labels): if scheduler is not None: scheduler(epoch) - def predict( - self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs - ) -> np.ndarray: # type: ignore + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: """ - Perform prediction of the given classifier for a batch of inputs + Performs cumulative predictions over every ablation location - :param x: Input samples. - :param batch_size: Batch size. + :param x: Unablated image + :param batch_size: the batch size for the prediction :param training_mode: if to run the classifier in training mode - :return: Array of predictions of shape `(nb_inputs, nb_classes)`. + :return: cumulative predictions after sweeping over all the ablation configurations. + """ + if self._channels_first: + columns_in_data = x.shape[-1] + rows_in_data = x.shape[-2] + else: + columns_in_data = x.shape[-2] + rows_in_data = x.shape[-3] + + if self.ablation_type in {"column", "row"}: + if self.ablation_type == "column": + ablate_over_range = columns_in_data + else: + # image will be transposed, so loop over the number of rows + ablate_over_range = rows_in_data + + for ablation_start in range(ablate_over_range): + ablated_x = self.ablator.forward(np.copy(x), column_pos=ablation_start) + if ablation_start == 0: + preds = self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + else: + preds += self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + elif self.ablation_type == "block": + for xcorner in range(rows_in_data): + for ycorner in range(columns_in_data): + ablated_x = self.ablator.forward(np.copy(x), row_pos=xcorner, column_pos=ycorner) + if ycorner == 0 and xcorner == 0: + preds = self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + else: + preds += self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + return preds + + def eval_and_certify( + self, + x: np.ndarray, + y: np.ndarray, + size_to_certify: int, + batch_size: int = 128, + verbose: bool = True, + ) -> Tuple["tf.Tensor", "tf.Tensor"]: + """ + Evaluates the normal and certified performance over the supplied data. + + :param x: Evaluation data. + :param y: Evaluation labels. + :param size_to_certify: The size of the patch to certify against. + If not provided will default to the ablation size. + :param batch_size: batch size when evaluating. + :param verbose: If to display the progress bar + :return: The accuracy and certified accuracy over the dataset """ - return DeRandomizedSmoothingMixin.predict(self, x, batch_size=batch_size, training_mode=training_mode, **kwargs) + import tensorflow as tf + + # self.model.eval() what is the tf equivalent? + y = check_and_transform_label_format(y, nb_classes=self.nb_classes) + + # Apply preprocessing + x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) + + num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) + pbar = tqdm(range(num_batch), disable=not verbose) + accuracy = tf.constant(np.array(0.0), dtype=tf.dtypes.int32) + cert_sum = tf.constant(np.array(0.0), dtype=tf.dtypes.int32) + n_samples = 0 + + for m in pbar: + if m == (num_batch - 1): + i_batch = np.copy(x_preprocessed[m * batch_size :]) + o_batch = y_preprocessed[m * batch_size :] + else: + i_batch = np.copy(x_preprocessed[m * batch_size : (m + 1) * batch_size]) + o_batch = y_preprocessed[m * batch_size : (m + 1) * batch_size] + + pred_counts = tf.zeros((len(i_batch), self.nb_classes), dtype=tf.dtypes.int32) + if self.ablation_type in {"column", "row"}: + for pos in range(i_batch.shape[-1]): + ablated_batch = self.ablator.forward(i_batch, column_pos=pos) + # Perform prediction + model_outputs = self.model(ablated_batch) + + if self.logits: + model_outputs = tf.nn.softmax(model_outputs) + model_outputs = model_outputs >= self.threshold + pred_counts += tf.where(model_outputs, 1, 0) + + else: + for column_pos in range(i_batch.shape[-1]): + for row_pos in range(i_batch.shape[-2]): + ablated_batch = self.ablator.forward(i_batch, column_pos=column_pos, row_pos=row_pos) + model_outputs = self.model(ablated_batch) + + if self.logits: + model_outputs = tf.nn.softmax(model_outputs) + model_outputs = model_outputs >= self.threshold + pred_counts += tf.where(model_outputs, 1, 0) + + _, cert_and_correct, top_predicted_class = self.ablator.certify( + pred_counts, size_to_certify=size_to_certify, label=o_batch + ) + cert_sum += tf.math.reduce_sum(tf.where(cert_and_correct, 1, 0)) + accuracy += tf.math.reduce_sum(tf.where(top_predicted_class == o_batch, 1, 0)) + n_samples += len(cert_and_correct) + + pbar.set_description(f"Normal Acc {accuracy / n_samples:.3f} " f"Cert Acc {cert_sum / n_samples:.3f}") + + return (accuracy / n_samples), (cert_sum / n_samples) diff --git a/tests/estimators/certification/test_derandomized_smoothing.py b/tests/estimators/certification/test_derandomized_smoothing.py index a7b3bd9705..bcae2c4844 100644 --- a/tests/estimators/certification/test_derandomized_smoothing.py +++ b/tests/estimators/certification/test_derandomized_smoothing.py @@ -360,14 +360,14 @@ def get_weights(): x = np.squeeze(x) x = np.expand_dims(x, axis=-1) preds = classifier.predict(x) - num_certified = classifier.ablator.certify( + cert, cert_and_correct, top_predicted_class_argmax = classifier.ablator.certify( preds, label=fix_get_mnist_data[1], size_to_certify=size_to_certify ) if ablation_type == "column": - assert np.sum(num_certified) == 52 + assert np.sum(cert) == 52 else: - assert np.sum(num_certified) == 22 + assert np.sum(cert) == 22 except ARTTestException as e: art_warning(e) From cdc3938589bce97c47d77b298651beeda17fe4e0 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Sat, 26 Aug 2023 10:57:39 +0100 Subject: [PATCH 40/55] updating with changes from ART Signed-off-by: GiulioZizzo --- art/estimators/certification/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/art/estimators/certification/__init__.py b/art/estimators/certification/__init__.py index 33a97ad7ad..0fe6df8632 100644 --- a/art/estimators/certification/__init__.py +++ b/art/estimators/certification/__init__.py @@ -6,7 +6,6 @@ from art.estimators.certification.randomized_smoothing.numpy import NumpyRandomizedSmoothing from art.estimators.certification.randomized_smoothing.tensorflow import TensorFlowV2RandomizedSmoothing from art.estimators.certification.randomized_smoothing.pytorch import PyTorchRandomizedSmoothing -from art.estimators.certification.derandomized_smoothing.derandomized_smoothing import DeRandomizedSmoothingMixin from art.estimators.certification.derandomized_smoothing.pytorch import PyTorchDeRandomizedSmoothing from art.estimators.certification.derandomized_smoothing.tensorflow import TensorFlowV2DeRandomizedSmoothing From 4d01aeeebc47ab49c87ca0993865353a38c41a3e Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Sat, 26 Aug 2023 13:26:26 +0100 Subject: [PATCH 41/55] updates to tf Signed-off-by: GiulioZizzo --- ...y => derandomized_smoothing_tensorflow.py} | 38 ++++++++++++++++--- .../derandomized_smoothing/tensorflow.py | 9 +++-- 2 files changed, 38 insertions(+), 9 deletions(-) rename art/estimators/certification/derandomized_smoothing/{derandomized_smoothing.py => derandomized_smoothing_tensorflow.py} (91%) diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_tensorflow.py similarity index 91% rename from art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py rename to art/estimators/certification/derandomized_smoothing/derandomized_smoothing_tensorflow.py index dbde0abbce..bf421dc615 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_tensorflow.py @@ -26,9 +26,9 @@ from abc import ABC, abstractmethod from typing import Optional, Union, Tuple import random -import tensorflow as tf import numpy as np +import tensorflow as tf class BaseAblator(ABC): @@ -51,7 +51,9 @@ def __call__( raise NotImplementedError @abstractmethod - def certify(self, preds: np.ndarray, size_to_certify: int, label: Optional[np.ndarray] = None): + def certify( + self, preds: "tf.Tensor", size_to_certify: int, label: Union[np.ndarray, "tf.Tensor"] + ) -> Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"]: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. @@ -124,7 +126,7 @@ def __call__( return self.forward(x=x, column_pos=column_pos) def certify( - self, preds: tf.Tensor, size_to_certify: int, label: Optional[np.ndarray] = None + self, preds: tf.Tensor, size_to_certify: int, label: Union[np.ndarray, tf.Tensor] ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a @@ -146,7 +148,19 @@ def certify( (top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1) ) & (top_predicted_class < second_predicted_class) cert = tf.math.logical_or(certs, tie_break_certs) - cert_and_correct = cert & (label == top_predicted_class) + + # NB, newer versions of pylint do not require the disable. + if label.ndim > 1: + cert_and_correct = cert & ( + tf.math.argmax(label, axis=1) + == tf.cast( + top_predicted_class, dtype=tf.math.argmax(label, axis=1).dtype + ) # pylint: disable=E1120, E1123 + ) + else: + cert_and_correct = cert & ( + label == tf.cast(top_predicted_class, dtype=label.dtype) + ) # pylint: disable=E1120, E1123 return cert, cert_and_correct, top_predicted_class @@ -249,7 +263,7 @@ def __call__( return self.forward(x=x, row_pos=row_pos, column_pos=column_pos) def certify( - self, preds: np.ndarray, size_to_certify: int, label: Optional[np.ndarray] = None + self, preds: np.ndarray, size_to_certify: int, label: Union[np.ndarray, tf.Tensor] ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a @@ -270,7 +284,19 @@ def certify( (top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1) ** 2 ) & (top_predicted_class < second_predicted_class) cert = tf.math.logical_or(certs, tie_break_certs) - cert_and_correct = cert & (label == top_predicted_class) + + # NB, newer versions of pylint do not require the disable. + if label.ndim > 1: + cert_and_correct = cert & ( + tf.math.argmax(label, axis=1) + == tf.cast( + top_predicted_class, dtype=tf.math.argmax(label, axis=1).dtype + ) # pylint: disable=E1120, E1123 + ) + else: + cert_and_correct = cert & ( + label == tf.cast(top_predicted_class, dtype=label.dtype) + ) # pylint: disable=E1120, E1123 return cert, cert_and_correct, top_predicted_class diff --git a/art/estimators/certification/derandomized_smoothing/tensorflow.py b/art/estimators/certification/derandomized_smoothing/tensorflow.py index c0ed0c8302..b5f32494f1 100644 --- a/art/estimators/certification/derandomized_smoothing/tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/tensorflow.py @@ -125,7 +125,7 @@ def __init__( self.threshold = threshold self._channels_first = channels_first - from art.estimators.certification.derandomized_smoothing.derandomized_smoothing import ( + from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_tensorflow import ( ColumnAblator, BlockAblator, ) @@ -307,7 +307,7 @@ def eval_and_certify( for pos in range(i_batch.shape[-1]): ablated_batch = self.ablator.forward(i_batch, column_pos=pos) # Perform prediction - model_outputs = self.model(ablated_batch) + model_outputs = self.model(ablated_batch, training=False) if self.logits: model_outputs = tf.nn.softmax(model_outputs) @@ -318,13 +318,16 @@ def eval_and_certify( for column_pos in range(i_batch.shape[-1]): for row_pos in range(i_batch.shape[-2]): ablated_batch = self.ablator.forward(i_batch, column_pos=column_pos, row_pos=row_pos) - model_outputs = self.model(ablated_batch) + model_outputs = self.model(ablated_batch, training=False) if self.logits: model_outputs = tf.nn.softmax(model_outputs) model_outputs = model_outputs >= self.threshold pred_counts += tf.where(model_outputs, 1, 0) + print("o_batch ", o_batch.dtype) + print("pred_counts ", pred_counts.dtype) + _, cert_and_correct, top_predicted_class = self.ablator.certify( pred_counts, size_to_certify=size_to_certify, label=o_batch ) From d2b3e7e84458989ba6d2b535923056c203ed45eb Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Sat, 26 Aug 2023 21:06:38 +0100 Subject: [PATCH 42/55] Changing line targeted by pylint disable due to black reformatting. Adding progress bar to tf to match pytorch Signed-off-by: GiulioZizzo --- .../derandomized_smoothing_tensorflow.py | 16 ++++++------- .../derandomized_smoothing/tensorflow.py | 23 +++++++++++++++---- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_tensorflow.py b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_tensorflow.py index bf421dc615..0b7da9c070 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_tensorflow.py @@ -153,14 +153,14 @@ def certify( if label.ndim > 1: cert_and_correct = cert & ( tf.math.argmax(label, axis=1) - == tf.cast( + == tf.cast( # pylint: disable=E1120, E1123 top_predicted_class, dtype=tf.math.argmax(label, axis=1).dtype - ) # pylint: disable=E1120, E1123 + ) ) else: cert_and_correct = cert & ( - label == tf.cast(top_predicted_class, dtype=label.dtype) - ) # pylint: disable=E1120, E1123 + label == tf.cast(top_predicted_class, dtype=label.dtype) # pylint: disable=E1120, E1123 + ) return cert, cert_and_correct, top_predicted_class @@ -289,14 +289,14 @@ def certify( if label.ndim > 1: cert_and_correct = cert & ( tf.math.argmax(label, axis=1) - == tf.cast( + == tf.cast( # pylint: disable=E1120, E1123 top_predicted_class, dtype=tf.math.argmax(label, axis=1).dtype - ) # pylint: disable=E1120, E1123 + ) ) else: cert_and_correct = cert & ( - label == tf.cast(top_predicted_class, dtype=label.dtype) - ) # pylint: disable=E1120, E1123 + label == tf.cast(top_predicted_class, dtype=label.dtype) # pylint: disable=E1120, E1123 + ) return cert, cert_and_correct, top_predicted_class diff --git a/art/estimators/certification/derandomized_smoothing/tensorflow.py b/art/estimators/certification/derandomized_smoothing/tensorflow.py index b5f32494f1..429fd73b11 100644 --- a/art/estimators/certification/derandomized_smoothing/tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/tensorflow.py @@ -153,7 +153,9 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo outputs = tf.nn.softmax(outputs) return np.asarray(outputs >= self.threshold).astype(int) - def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, **kwargs) -> None: + def fit( + self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, verbose: bool = True, **kwargs + ) -> None: """ Fit the classifier on the training set `(x, y)`. @@ -162,6 +164,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. + :param verbose: if to display training progress bars :param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports "scheduler" which is an optional function that will be called at the end of every epoch to adjust the learning rate. @@ -187,6 +190,7 @@ def train_step(model, images, labels): loss = self.loss_object(labels, predictions) gradients = tape.gradient(loss, model.trainable_variables) self.optimizer.apply_gradients(zip(gradients, model.trainable_variables)) + return loss else: train_step = self._train_step @@ -204,12 +208,24 @@ def train_step(model, images, labels): for epoch in tqdm(range(nb_epochs)): num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) + + epoch_loss = [] + epoch_batch_sizes = [] + + pbar = tqdm(range(num_batch), disable=not verbose) + ind = np.arange(len(x_preprocessed)) for m in range(num_batch): i_batch = np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]) labels = y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]] images = self.ablator.forward(i_batch) - train_step(self.model, images, labels) + loss = train_step(self.model, images, labels) + + epoch_loss.append(loss.numpy()) + epoch_batch_sizes.append(len(i_batch)) + + if verbose: + pbar.set_description(f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} ") if scheduler is not None: scheduler(epoch) @@ -325,9 +341,6 @@ def eval_and_certify( model_outputs = model_outputs >= self.threshold pred_counts += tf.where(model_outputs, 1, 0) - print("o_batch ", o_batch.dtype) - print("pred_counts ", pred_counts.dtype) - _, cert_and_correct, top_predicted_class = self.ablator.certify( pred_counts, size_to_certify=size_to_certify, label=o_batch ) From 288cc94f515e117e0e6fbff2f4f65bd5bbbd9235 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Sun, 27 Aug 2023 09:37:32 +0000 Subject: [PATCH 43/55] Tf progress bar and certification simplification Signed-off-by: GiulioZizzo --- .../derandomized_smoothing/tensorflow.py | 55 ++++++++----------- 1 file changed, 22 insertions(+), 33 deletions(-) diff --git a/art/estimators/certification/derandomized_smoothing/tensorflow.py b/art/estimators/certification/derandomized_smoothing/tensorflow.py index 429fd73b11..d3c4d46897 100644 --- a/art/estimators/certification/derandomized_smoothing/tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/tensorflow.py @@ -153,7 +153,7 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo outputs = tf.nn.softmax(outputs) return np.asarray(outputs >= self.threshold).astype(int) - def fit( + def fit( # pylint: disable=W0221 self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, verbose: bool = True, **kwargs ) -> None: """ @@ -190,7 +190,7 @@ def train_step(model, images, labels): loss = self.loss_object(labels, predictions) gradients = tape.gradient(loss, model.trainable_variables) self.optimizer.apply_gradients(zip(gradients, model.trainable_variables)) - return loss + return loss, predictions else: train_step = self._train_step @@ -206,26 +206,38 @@ def train_step(model, images, labels): if self._reduce_labels: y_preprocessed = np.argmax(y_preprocessed, axis=1) - for epoch in tqdm(range(nb_epochs)): + for epoch in tqdm(range(nb_epochs), desc="Epochs"): num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) + epoch_acc = [] epoch_loss = [] epoch_batch_sizes = [] pbar = tqdm(range(num_batch), disable=not verbose) ind = np.arange(len(x_preprocessed)) - for m in range(num_batch): + for m in pbar: i_batch = np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]) labels = y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]] images = self.ablator.forward(i_batch) - loss = train_step(self.model, images, labels) - epoch_loss.append(loss.numpy()) - epoch_batch_sizes.append(len(i_batch)) + if self._train_step is None: + loss, predictions = train_step(self.model, images, labels) + acc = np.sum(np.argmax(predictions.numpy(), axis=1) == np.argmax(labels, axis=1)) / len(labels) + epoch_acc.append(acc) + epoch_loss.append(loss.numpy()) + epoch_batch_sizes.append(len(i_batch)) + else: + train_step(self.model, images, labels) if verbose: - pbar.set_description(f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} ") + if self._train_step is None: + pbar.set_description( + f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} " + f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} " + ) + else: + pbar.set_description("Batches") if scheduler is not None: scheduler(epoch) @@ -298,7 +310,6 @@ def eval_and_certify( """ import tensorflow as tf - # self.model.eval() what is the tf equivalent? y = check_and_transform_label_format(y, nb_classes=self.nb_classes) # Apply preprocessing @@ -318,36 +329,14 @@ def eval_and_certify( i_batch = np.copy(x_preprocessed[m * batch_size : (m + 1) * batch_size]) o_batch = y_preprocessed[m * batch_size : (m + 1) * batch_size] - pred_counts = tf.zeros((len(i_batch), self.nb_classes), dtype=tf.dtypes.int32) - if self.ablation_type in {"column", "row"}: - for pos in range(i_batch.shape[-1]): - ablated_batch = self.ablator.forward(i_batch, column_pos=pos) - # Perform prediction - model_outputs = self.model(ablated_batch, training=False) - - if self.logits: - model_outputs = tf.nn.softmax(model_outputs) - model_outputs = model_outputs >= self.threshold - pred_counts += tf.where(model_outputs, 1, 0) - - else: - for column_pos in range(i_batch.shape[-1]): - for row_pos in range(i_batch.shape[-2]): - ablated_batch = self.ablator.forward(i_batch, column_pos=column_pos, row_pos=row_pos) - model_outputs = self.model(ablated_batch, training=False) - - if self.logits: - model_outputs = tf.nn.softmax(model_outputs) - model_outputs = model_outputs >= self.threshold - pred_counts += tf.where(model_outputs, 1, 0) + pred_counts = self.predict(i_batch) _, cert_and_correct, top_predicted_class = self.ablator.certify( pred_counts, size_to_certify=size_to_certify, label=o_batch ) cert_sum += tf.math.reduce_sum(tf.where(cert_and_correct, 1, 0)) - accuracy += tf.math.reduce_sum(tf.where(top_predicted_class == o_batch, 1, 0)) + accuracy += tf.math.reduce_sum(tf.where(top_predicted_class == np.argmax(o_batch, axis=-1), 1, 0)) n_samples += len(cert_and_correct) pbar.set_description(f"Normal Acc {accuracy / n_samples:.3f} " f"Cert Acc {cert_sum / n_samples:.3f}") - return (accuracy / n_samples), (cert_sum / n_samples) From 283564d925b1843bdf79677b79acacbfd0a4384c Mon Sep 17 00:00:00 2001 From: GiulioZizzo <41791963+GiulioZizzo@users.noreply.github.com> Date: Mon, 28 Aug 2023 16:43:36 +0100 Subject: [PATCH 44/55] initial review edits Co-authored-by: Beat Buesser <49047826+beat-buesser@users.noreply.github.com> Signed-off-by: GiulioZizzo --- .../derandomized_smoothing_pytorch.py | 6 +++--- .../certification/derandomized_smoothing/pytorch.py | 2 +- .../derandomized_smoothing/vision_transformers/vit.py | 5 +++-- tests/estimators/certification/test_smooth_vit.py | 3 --- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py index 3a7bf434f6..b39792fd69 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py @@ -31,7 +31,7 @@ import torch -class UpSampler(torch.nn.Module): +class UpSamplerPyTorch(torch.nn.Module): """ Resizes datasets to the specified size. Usually for upscaling datasets like CIFAR to Imagenet format @@ -57,7 +57,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return self.upsample(x) -class ColumnAblator(torch.nn.Module): +class ColumnAblatorPyTorch(torch.nn.Module): """ Pure Pytorch implementation of stripe/column ablation. """ @@ -218,7 +218,7 @@ def certify( return cert, cert_and_correct, top_predicted_class_argmax -class BlockAblator(torch.nn.Module): +class BlockAblatorPyTorch(torch.nn.Module): """ Pure Pytorch implementation of block ablation. """ diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 638fe4ad3f..b6ac22f18c 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -16,7 +16,7 @@ # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ -This module implements the two De-randomized smoothing approaches supported by ART for pytorch. +This module implements De-Randomized smoothing approaches PyTorch. (De)Randomized Smoothing for Certifiable Defense against Patch Attacks diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py index ae8549a8e7..11f2e461e5 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py @@ -112,7 +112,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class PyTorchViT(VisionTransformer): """ - Art class inheriting from VisionTransformer to control the forward pass of the ViT. + Class to control the forward pass of the ViT. """ # Make as a class attribute to avoid being included in the @@ -121,7 +121,8 @@ class PyTorchViT(VisionTransformer): def __init__(self, **kwargs): """ - Create a ArtViT instance + Create a PyTorchViT instance + :param kwargs: keyword arguments required to create the mask embedder and the vision transformer class """ self.to_drop_tokens = kwargs["drop_tokens"] diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index 3c6bffc3bf..6a6841e00e 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -271,9 +271,6 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing - # import shutil - # if os.path.exists('smoothed-vit'): - # shutil.rmtree('smoothed-vit') os.system("git clone https://github.com/MadryLab/smoothed-vit") sys.path.append("smoothed-vit/src/utils/") From 22abf10b3236883bf065a9c16c8ef1f8ff3f6e72 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Mon, 28 Aug 2023 17:56:36 +0100 Subject: [PATCH 45/55] Incorporating review feedback Signed-off-by: GiulioZizzo --- .../derandomized_smoothing/__init__.py | 1 + .../pytorch.py} | 26 ++-- .../tensorflow.py} | 69 +-------- .../derandomized_smoothing/derandomized.py | 66 ++++++++ .../derandomized_smoothing/pytorch.py | 21 +-- .../derandomized_smoothing/tensorflow.py | 3 +- .../vision_transformers/vit.py | 51 ++++--- .../certification/test_smooth_vit.py | 144 +++--------------- 8 files changed, 150 insertions(+), 231 deletions(-) rename art/estimators/certification/derandomized_smoothing/{derandomized_smoothing_pytorch.py => ablators/pytorch.py} (93%) rename art/estimators/certification/derandomized_smoothing/{derandomized_smoothing_tensorflow.py => ablators/tensorflow.py} (83%) create mode 100644 art/estimators/certification/derandomized_smoothing/derandomized.py diff --git a/art/estimators/certification/derandomized_smoothing/__init__.py b/art/estimators/certification/derandomized_smoothing/__init__.py index 69753f4f39..64397cbb5d 100644 --- a/art/estimators/certification/derandomized_smoothing/__init__.py +++ b/art/estimators/certification/derandomized_smoothing/__init__.py @@ -1,5 +1,6 @@ """ DeRandomized smoothing estimators. """ +# from art.estimators.certification.derandomized_smoothing.derandomized import DeRandomizedSmoothingMixin from art.estimators.certification.derandomized_smoothing.pytorch import PyTorchDeRandomizedSmoothing from art.estimators.certification.derandomized_smoothing.tensorflow import TensorFlowV2DeRandomizedSmoothing diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py b/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py similarity index 93% rename from art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py rename to art/estimators/certification/derandomized_smoothing/ablators/pytorch.py index b39792fd69..07062084db 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py @@ -30,6 +30,8 @@ import numpy as np import torch +from art.estimators.certification.derandomized_smoothing.ablators.ablate import BaseAblator + class UpSamplerPyTorch(torch.nn.Module): """ @@ -57,7 +59,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return self.upsample(x) -class ColumnAblatorPyTorch(torch.nn.Module): +class ColumnAblatorPyTorch(torch.nn.Module, BaseAblator): """ Pure Pytorch implementation of stripe/column ablation. """ @@ -66,7 +68,7 @@ def __init__( self, ablation_size: int, channels_first: bool, - mode, + mode: str, to_reshape: bool, ablation_mode: str = "column", original_shape: Optional[Tuple] = None, @@ -79,6 +81,7 @@ def __init__( :param ablation_size: The size of the column we will retain. :param channels_first: If the input is in channels first format. Currently required to be True. + :param mode: If we are running the algorithm using a CNN or VIT. :param to_reshape: If the input requires reshaping. :param original_shape: Original shape of the input. :param output_shape: Input shape expected by the ViT. Usually means upscaling the input to 224 x 224. @@ -106,14 +109,16 @@ def __init__( self.device = torch.device(f"cuda:{cuda_idx}") if original_shape is not None and output_shape is not None: - self.upsample = UpSampler(input_size=original_shape[1], final_size=output_shape[1]) + self.upsample = UpSamplerPyTorch(input_size=original_shape[1], final_size=output_shape[1]) - def ablate(self, x: torch.Tensor, column_pos: int) -> torch.Tensor: + def ablate(self, x: torch.Tensor, column_pos: int, row_pos=None) -> torch.Tensor: """ Ablates the input column wise :param x: Input data - :param column_pos: The start position of the albation + :param column_pos: location to start the retained column. NB, if row_ablation_mode is true then this will + be used to act on the rows through transposing the image in ColumnAblatorPyTorch.forward + :param row_pos: Unused. :return: The ablated input with 0s where the ablation occurred """ k = self.ablation_size @@ -152,7 +157,9 @@ def forward( x = torch.cat([x, 1.0 - x], dim=1) if self.original_shape is not None and x.shape[1] != self.original_shape[0] and self.additional_channels: - raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") + raise ValueError( + f"Ablator expected {self.original_shape[0]} input channels. Received shape of {x.shape[1]}" + ) if self.ablation_mode == "row": x = torch.transpose(x, 3, 2) @@ -218,7 +225,7 @@ def certify( return cert, cert_and_correct, top_predicted_class_argmax -class BlockAblatorPyTorch(torch.nn.Module): +class BlockAblatorPyTorch(torch.nn.Module, BaseAblator): """ Pure Pytorch implementation of block ablation. """ @@ -227,7 +234,7 @@ def __init__( self, ablation_size: int, channels_first: bool, - mode, + mode: str, to_reshape: bool, original_shape: Optional[Tuple] = None, output_shape: Optional[Tuple] = None, @@ -239,6 +246,7 @@ def __init__( :param ablation_size: The size of the block we will retain. :param channels_first: If the input is in channels first format. Currently required to be True. + :param mode: If we are running the algorithm using a CNN or VIT. :param to_reshape: If the input requires reshaping. :param original_shape: Original shape of the input. :param output_shape: Input shape expected by the ViT. Usually means upscaling the input to 224 x 224. @@ -265,7 +273,7 @@ def __init__( self.device = torch.device(f"cuda:{cuda_idx}") if original_shape is not None and output_shape is not None: - self.upsample = UpSampler(input_size=original_shape[1], final_size=output_shape[1]) + self.upsample = UpSamplerPyTorch(input_size=original_shape[1], final_size=output_shape[1]) def ablate(self, x: torch.Tensor, column_pos: int, row_pos: int) -> torch.Tensor: """ diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_tensorflow.py b/art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py similarity index 83% rename from art/estimators/certification/derandomized_smoothing/derandomized_smoothing_tensorflow.py rename to art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py index 0b7da9c070..e51a0f88c8 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing_tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals -from abc import ABC, abstractmethod +from art.estimators.certification.derandomized_smoothing.ablators.ablate import BaseAblator from typing import Optional, Union, Tuple import random @@ -31,65 +31,6 @@ import tensorflow as tf -class BaseAblator(ABC): - """ - Base class defining the methods used for the ablators. - """ - - @abstractmethod - def __call__( - self, x: np.ndarray, column_pos: Optional[Union[int, list]] = None, row_pos: Optional[Union[int, list]] = None - ) -> np.ndarray: - """ - Ablate the image x at location specified by "column_pos" for the case of column ablation or at the location - specified by "column_pos" and "row_pos" in the case of block ablation. - - :param x: input image. - :param column_pos: column position to specify where to retain the image - :param row_pos: row position to specify where to retain the image. Not used for ablation type "column". - """ - raise NotImplementedError - - @abstractmethod - def certify( - self, preds: "tf.Tensor", size_to_certify: int, label: Union[np.ndarray, "tf.Tensor"] - ) -> Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"]: - """ - Checks if based on the predictions supplied the classifications over the ablated datapoints result in a - certified prediction against a patch attack of size size_to_certify. - - :param preds: The cumulative predictions of the classifier over the ablation locations. - :param size_to_certify: The size of the patch to check against. - :param label: ground truth labels - """ - raise NotImplementedError - - @abstractmethod - def ablate(self, x: np.ndarray, column_pos: int, row_pos: int) -> np.ndarray: - """ - Ablate the image x at location specified by "column_pos" for the case of column ablation or at the location - specified by "column_pos" and "row_pos" in the case of block ablation. - - :param x: input image. - :param column_pos: column position to specify where to retain the image - :param row_pos: row position to specify where to retain the image. Not used for ablation type "column". - """ - raise NotImplementedError - - @abstractmethod - def forward( - self, x: np.ndarray, column_pos: Optional[Union[int, list]] = None, row_pos: Optional[Union[int, list]] = None - ) -> np.ndarray: - """ - Ablate batch of data at locations specified by column_pos and row_pos - - :param x: input image. - :param column_pos: column position to specify where to retain the image - :param row_pos: row position to specify where to retain the image. Not used for ablation type "column". - """ - raise NotImplementedError - - class ColumnAblator(BaseAblator): """ Implements the functionality for albating the image, and retaining only a column @@ -135,7 +76,9 @@ def certify( :param preds: The cumulative predictions of the classifier over the ablation locations. :param size_to_certify: The size of the patch to check against. :param label: Ground truth labels - :return: Array of bools indicating if a point is certified against the given patch dimensions. + :return: A tuple consisting of: the certified predictions, + the predictions which were certified and also correct, + and the most predicted class across the different ablations on the input. """ result = tf.math.top_k(preds, k=2) @@ -272,7 +215,9 @@ def certify( :param preds: The cumulative predictions of the classifier over the ablation locations. :param size_to_certify: The size of the patch to check against. :param label: Ground truth labels - :return: Array of bools indicating if a point is certified against the given patch dimensions. + :return: A tuple consisting of: the certified predictions, + the predictions which were certified and also correct, + and the most predicted class across the different ablations on the input. """ result = tf.math.top_k(preds, k=2) diff --git a/art/estimators/certification/derandomized_smoothing/derandomized.py b/art/estimators/certification/derandomized_smoothing/derandomized.py new file mode 100644 index 0000000000..5b055cf20d --- /dev/null +++ b/art/estimators/certification/derandomized_smoothing/derandomized.py @@ -0,0 +1,66 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2022 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +""" +This module implements (De)Randomized Smoothing defences from papers: + +| Paper link: https://arxiv.org/abs/2110.07719 +| Paper link: https://arxiv.org/abs/2002.10733 +""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +from abc import ABC +import numpy as np + + +class DeRandomizedSmoothingMixin(ABC): + """ + Mixin class for smoothed estimators. + """ + + def __init__( + self, + *args, + **kwargs, + ) -> None: + """ + Create a derandomized smoothing wrapper. + """ + super().__init__(*args, **kwargs) # type: ignore + + def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: + """ + Perform prediction for a batch of inputs. + + :param x: Input samples. + :param batch_size: Size of batches. + :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :return: Array of predictions of shape `(nb_inputs, nb_classes)`. + """ + raise NotImplementedError + + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: + """ + Performs cumulative predictions over every ablation location + + :param x: Unablated image + :param batch_size: the batch size for the prediction + :param training_mode: if to run the classifier in training mode + :return: cumulative predictions after sweeping over all the ablation configurations. + """ + raise NotImplementedError diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index b6ac22f18c..967234bc34 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -42,6 +42,7 @@ from tqdm import tqdm from art.estimators.classification.pytorch import PyTorchClassifier +from art.estimators.certification.derandomized_smoothing.derandomized import DeRandomizedSmoothingMixin from art.estimators.certification.derandomized_smoothing.vision_transformers.pytorch import PyTorchSmoothedViT from art.utils import check_and_transform_label_format @@ -57,7 +58,7 @@ logger = logging.getLogger(__name__) -class PyTorchDeRandomizedSmoothing(PyTorchSmoothedViT, PyTorchClassifier): +class PyTorchDeRandomizedSmoothing(PyTorchSmoothedViT, DeRandomizedSmoothingMixin, PyTorchClassifier): """ Interface class for the two De-randomized smoothing approaches supported by ART for pytorch. @@ -271,16 +272,19 @@ def __init__( if verbose: logger.info(self.model) - from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ( - ColumnAblator, - BlockAblator, + from art.estimators.certification.derandomized_smoothing.ablators.pytorch import ( + ColumnAblatorPyTorch, + BlockAblatorPyTorch, ) if TYPE_CHECKING: - self.ablator: Union[ColumnAblator, BlockAblator] + self.ablator: Union[ColumnAblatorPyTorch, BlockAblatorPyTorch] + + if self.mode is None: + raise ValueError("Model type not recognized.") if ablation_type in {"column", "row"}: - self.ablator = ColumnAblator( + self.ablator = ColumnAblatorPyTorch( ablation_size=ablation_size, channels_first=True, ablation_mode=ablation_type, @@ -292,7 +296,7 @@ def __init__( mode=self.mode, ) elif ablation_type == "block": - self.ablator = BlockAblator( + self.ablator = BlockAblatorPyTorch( ablation_size=ablation_size, channels_first=True, to_reshape=self.to_reshape, @@ -305,9 +309,6 @@ def __init__( else: raise ValueError(f"ablation_type of {ablation_type} not recognized. Must be either column or block") - if self.mode is None: - raise ValueError("Model type not recognized.") - def fit( # pylint: disable=W0221 self, x: np.ndarray, diff --git a/art/estimators/certification/derandomized_smoothing/tensorflow.py b/art/estimators/certification/derandomized_smoothing/tensorflow.py index d3c4d46897..89cc2d58fc 100644 --- a/art/estimators/certification/derandomized_smoothing/tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/tensorflow.py @@ -28,6 +28,7 @@ import numpy as np from tqdm import tqdm +from art.estimators.certification.derandomized_smoothing.derandomized import DeRandomizedSmoothingMixin from art.estimators.classification.tensorflow import TensorFlowV2Classifier from art.utils import check_and_transform_label_format @@ -41,7 +42,7 @@ logger = logging.getLogger(__name__) -class TensorFlowV2DeRandomizedSmoothing(TensorFlowV2Classifier): +class TensorFlowV2DeRandomizedSmoothing(TensorFlowV2Classifier, DeRandomizedSmoothingMixin): """ Implementation of (De)Randomized Smoothing applied to classifier predictions as introduced in Levine et al. (2020). diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py index 11f2e461e5..9403785541 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py @@ -15,6 +15,33 @@ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. + +# PatchEmbed class adapted from the implementation in https://github.com/MadryLab/smoothed-vit +# +# Original License: +# +# MIT License +# +# Copyright (c) 2021 Madry Lab +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE + """ Implements functionality for running Vision Transformers in ART """ @@ -30,29 +57,7 @@ class PatchEmbed(torch.nn.Module): Class adapted from the implementation in https://github.com/MadryLab/smoothed-vit - Original License: - - MIT License - - Copyright (c) 2021 Madry Lab - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE + Original License stated above. """ def __init__(self, patch_size: int = 16, in_channels: int = 1, embed_dim: int = 768): diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index 6a6841e00e..e7c431e7e6 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -66,12 +66,12 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): import torch device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator + from art.estimators.certification.derandomized_smoothing.ablators.pytorch import ColumnAblatorPyTorch try: cifar_data = fix_get_cifar10_data[0] - col_ablator = ColumnAblator( + col_ablator = ColumnAblatorPyTorch( ablation_size=4, channels_first=True, to_reshape=False, # do not upsample initially @@ -98,7 +98,7 @@ def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): assert torch.sum(ablated[:, :, :, :2]) > 0 # check that upsampling works as expected - col_ablator = ColumnAblator( + col_ablator = ColumnAblatorPyTorch( ablation_size=4, channels_first=True, to_reshape=True, @@ -134,12 +134,12 @@ def test_ablation_row(art_warning, fix_get_mnist_data, fix_get_cifar10_data): import torch device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator + from art.estimators.certification.derandomized_smoothing.ablators.pytorch import ColumnAblatorPyTorch try: cifar_data = fix_get_cifar10_data[0] - col_ablator = ColumnAblator( + col_ablator = ColumnAblatorPyTorch( ablation_size=4, channels_first=True, to_reshape=False, # do not upsample initially @@ -167,7 +167,7 @@ def test_ablation_row(art_warning, fix_get_mnist_data, fix_get_cifar10_data): assert torch.sum(ablated[:, :, :2, :]) > 0 # check that upsampling works as expected - col_ablator = ColumnAblator( + col_ablator = ColumnAblatorPyTorch( ablation_size=4, channels_first=True, to_reshape=True, @@ -233,11 +233,11 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 """ Check that based on a given set of synthetic class predictions the certification gives the expected results. """ - from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ColumnAblator + from art.estimators.certification.derandomized_smoothing.ablators.pytorch import ColumnAblatorPyTorch import torch try: - col_ablator = ColumnAblator( + col_ablator = ColumnAblatorPyTorch( ablation_size=4, channels_first=True, mode="ViT", @@ -262,121 +262,20 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10_data, ablation): """ Assert implementations matches original with a forward pass through the same model architecture. - Note, there are some differences in architecture between the same model names. + Note, there are some differences in architecture between the same model names in timm vs the original implementation. We use vit_base_patch16_224 which matches. """ import torch - import os - import sys from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing - - os.system("git clone https://github.com/MadryLab/smoothed-vit") - sys.path.append("smoothed-vit/src/utils/") - - # Original MaskProcessor used ones_mask = torch.cat([torch.cuda.IntTensor(1).fill_(0), ones_mask]).unsqueeze(0) - # which is not compatible with non-cuda torch as is found when running tests on github. - # Hence, replace the class with the same code, but having changed to - # ones_mask = torch.cat([torch.IntTensor(1).fill_(0), ones_mask]).unsqueeze(0) - # Original licence: - """ - MIT License - - Copyright (c) 2021 Madry Lab - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - """ - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - class MaskProcessor(torch.nn.Module): - def __init__(self, patch_size=16): - super().__init__() - self.avg_pool = torch.nn.AvgPool2d(patch_size) - - def forward(self, ones_mask): - B = ones_mask.shape[0] - ones_mask = ones_mask[0].unsqueeze(0) # take the first mask - ones_mask = self.avg_pool(ones_mask)[0] - ones_mask = torch.where(ones_mask.view(-1) > 0)[0] + 1 - ones_mask = torch.cat([torch.IntTensor(1).fill_(0).to(device), ones_mask]).unsqueeze(0) - ones_mask = ones_mask.expand(B, -1) - return ones_mask - - from custom_models import preprocess - - preprocess.MaskProcessor = MaskProcessor - - from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_pytorch import ( - ColumnAblator, - BlockAblator, + from art.estimators.certification.derandomized_smoothing.ablators import ( + ColumnAblatorPyTorch, + BlockAblatorPyTorch, ) - from custom_models.vision_transformer import vit_base_patch16_224 cifar_data = fix_get_cifar10_data[0][:50] - - ''' - timm config for: - def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: - """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). - ImageNet-1k weights fine-tuned from in21k @ 224x224, - source https://github.com/google-research/vision_transformer. - """ - model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) - model = _create_vision_transformer('vit_base_patch16_224', - pretrained=pretrained, **dict(model_args, **kwargs)) - return model - - - def vit_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: - """ ViT-Small (ViT-S/16) - """ - model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) - model = _create_vision_transformer('vit_small_patch16_224', - pretrained=pretrained, **dict(model_args, **kwargs)) - return model - - smooth repo config for: - def vit_small_patch16_224(pretrained=False, **kwargs): - if pretrained: - # NOTE my scale was wrong for original weights, leaving this here - # until I have better ones for this model - kwargs.setdefault('qk_scale', 768 ** -0.5) - model = VisionTransformer(patch_size=16, embed_dim=768, depth=8, num_heads=8, mlp_ratio=3., **kwargs) - model.default_cfg = default_cfgs['vit_small_patch16_224'] - if pretrained: - load_pretrained( - model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3), filter_fn=_conv_filter) - return model - - - def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: - """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). - ImageNet-1k weights fine-tuned from in21k @ 224x224, - source https://github.com/google-research/vision_transformer. - """ - model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) - model = _create_vision_transformer('vit_base_patch16_224', - pretrained=pretrained, **dict(model_args, **kwargs)) - return model - - ''' + torch.manual_seed(1234) art_model = PyTorchDeRandomizedSmoothing( model="vit_base_patch16_224", @@ -390,14 +289,9 @@ def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: replace_last_layer=True, verbose=False, ) - art_sd = art_model.model.state_dict() - madry_vit = vit_base_patch16_224(pretrained=False) - madry_vit.head = torch.nn.Linear(madry_vit.head.in_features, 10) - madry_vit.load_state_dict(art_sd) - madry_vit = madry_vit.to(device) if ablation == "column": - ablator = ColumnAblator( + ablator = ColumnAblatorPyTorch( ablation_size=4, channels_first=True, to_reshape=True, @@ -406,12 +300,12 @@ def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: output_shape=(3, 224, 224), ) ablated = ablator.forward(cifar_data, column_pos=10) - madry_preds = madry_vit(ablated) + madry_preds = torch.load('smooth_vit_results/madry_preds_column.pt') art_preds = art_model.model(ablated) assert torch.allclose(madry_preds, art_preds, rtol=1e-04, atol=1e-04) elif ablation == "block": - ablator = BlockAblator( + ablator = BlockAblatorPyTorch( ablation_size=4, channels_first=True, to_reshape=True, @@ -420,12 +314,10 @@ def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: mode="ViT", ) ablated = ablator.forward(cifar_data, column_pos=10, row_pos=28) - madry_preds = madry_vit(ablated) + madry_preds = torch.load('smooth_vit_results/madry_preds_block.pt') art_preds = art_model.model(ablated) assert torch.allclose(madry_preds, art_preds, rtol=1e-04, atol=1e-04) - sys.path.remove("smoothed-vit/src/utils/") - @pytest.mark.only_with_platform("pytorch") @pytest.mark.parametrize("ablation", ["block", "column"]) @@ -739,4 +631,4 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor: cifar_labels = fix_get_cifar10_data[1][:50] scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[1], gamma=0.1) - art_model.fit(cifar_data, cifar_labels, nb_epochs=2, update_batchnorm=True, scheduler=scheduler) + art_model.fit(cifar_data, cifar_labels, nb_epochs=1, update_batchnorm=True, scheduler=scheduler, batch_size=128) From addf590a17f018a385b7c5a715c76cf149b699b7 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Tue, 29 Aug 2023 10:25:28 +0100 Subject: [PATCH 46/55] adding expected results and base ablator. Signed-off-by: GiulioZizzo --- .../ablators/__init__.py | 5 ++ .../derandomized_smoothing/ablators/ablate.py | 76 ++++++++++++++++++ .../smooth_vit_results/madry_preds_block.pt | Bin 0 -> 2761 bytes .../smooth_vit_results/madry_preds_column.pt | Bin 0 -> 2764 bytes 4 files changed, 81 insertions(+) create mode 100644 art/estimators/certification/derandomized_smoothing/ablators/__init__.py create mode 100644 art/estimators/certification/derandomized_smoothing/ablators/ablate.py create mode 100644 tests/estimators/certification/smooth_vit_results/madry_preds_block.pt create mode 100644 tests/estimators/certification/smooth_vit_results/madry_preds_column.pt diff --git a/art/estimators/certification/derandomized_smoothing/ablators/__init__.py b/art/estimators/certification/derandomized_smoothing/ablators/__init__.py new file mode 100644 index 0000000000..d1f789cc9e --- /dev/null +++ b/art/estimators/certification/derandomized_smoothing/ablators/__init__.py @@ -0,0 +1,5 @@ +from art.estimators.certification.derandomized_smoothing.ablators.pytorch import ( + ColumnAblatorPyTorch, + BlockAblatorPyTorch, +) +from art.estimators.certification.derandomized_smoothing.ablators.tensorflow import ColumnAblator, BlockAblator diff --git a/art/estimators/certification/derandomized_smoothing/ablators/ablate.py b/art/estimators/certification/derandomized_smoothing/ablators/ablate.py new file mode 100644 index 0000000000..5e01b4ae19 --- /dev/null +++ b/art/estimators/certification/derandomized_smoothing/ablators/ablate.py @@ -0,0 +1,76 @@ +from __future__ import absolute_import, division, print_function, unicode_literals + +from abc import ABC, abstractmethod +from typing import Callable, List, Optional, Tuple, Union, TYPE_CHECKING + +import numpy as np +import tensorflow as tf +import torch + +if TYPE_CHECKING: + # pylint: disable=C0412 + import tensorflow as tf + from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE, ABLATOR_TYPE + from art.defences.preprocessor import Preprocessor + from art.defences.postprocessor import Postprocessor + + +class BaseAblator(ABC): + """ + Base class defining the methods used for the ablators. + """ + + @abstractmethod + def __call__( + self, x: np.ndarray, column_pos: Optional[Union[int, list]] = None, row_pos: Optional[Union[int, list]] = None + ) -> np.ndarray: + """ + Ablate the image x at location specified by "column_pos" for the case of column ablation or at the location + specified by "column_pos" and "row_pos" in the case of block ablation. + + :param x: input image. + :param column_pos: column position to specify where to retain the image + :param row_pos: row position to specify where to retain the image. Not used for ablation type "column". + """ + raise NotImplementedError + + @abstractmethod + def certify( + self, preds: Union["tf.Tensor", torch.Tensor], size_to_certify: int, label: Union[np.ndarray, "tf.Tensor"] + ) -> Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"]: + """ + Checks if based on the predictions supplied the classifications over the ablated datapoints result in a + certified prediction against a patch attack of size size_to_certify. + + :param preds: The cumulative predictions of the classifier over the ablation locations. + :param size_to_certify: The size of the patch to check against. + :param label: ground truth labels + """ + raise NotImplementedError + + @abstractmethod + def ablate( + self, x: Union[np.ndarray, torch.Tensor], column_pos: int, row_pos: int + ) -> Union[np.ndarray, torch.Tensor]: + """ + Ablate the image x at location specified by "column_pos" for the case of column ablation or at the location + specified by "column_pos" and "row_pos" in the case of block ablation. + + :param x: input image. + :param column_pos: column position to specify where to retain the image + :param row_pos: row position to specify where to retain the image. Not used for ablation type "column". + """ + raise NotImplementedError + + @abstractmethod + def forward( + self, x: np.ndarray, column_pos: Optional[Union[int, list]] = None, row_pos: Optional[Union[int, list]] = None + ) -> np.ndarray: + """ + Ablate batch of data at locations specified by column_pos and row_pos + + :param x: input image. + :param column_pos: column position to specify where to retain the image + :param row_pos: row position to specify where to retain the image. Not used for ablation type "column". + """ + raise NotImplementedError diff --git a/tests/estimators/certification/smooth_vit_results/madry_preds_block.pt b/tests/estimators/certification/smooth_vit_results/madry_preds_block.pt new file mode 100644 index 0000000000000000000000000000000000000000..f1d0fb862e4f310e31292dd48edcd173df868a06 GIT binary patch literal 2761 zcmaKu2~-qU7KW>71iBe;MG;(Z1r-6o5Jl=s_5;PGtN(9sx zY*7eCA;A?Dkf_kNP$4WMivbk`8c`&O%0MJ6qLA(}lSwiqp(r?HrTpVJ zH-%Cy2~VA4Bjro@>gXRDF4gwv;K`)}yxbXgTX*3esgAqQowrA-8|CjG5*!@pFJBw7 zF--W8EFe(5K-abYa-mdj%0j6?E)_16>RXB3b@fbx6$28~cA6o6XRF4{*mtyQZ6k6v z{dc39X5S*c;{^c23aIr(B_#TX!T7Wx7*KVQc)UlnPPHG_-*X0ul{tN1sE5(1cPOI% zD(s$}t?Yh02@Oqtp=U$->5~Bup*gF;@z5L@g~b%YzDorZ;gJPSuMD9nV=^W<{7iP| z`)Gp21sEnZ0q1xZY8v$t##S#T-}ka0YU@d`SJ-1p$xQmJH=Cl4>!YrD9_4;3QXVv3 zfN@_JP^D!xbf>wp-aTtjB7aTkwKKWAR3XMInj8&ZCDFqoEgW3kOIsWtLsZHHR#oeS z2ewv`g(<@wHoONhXPlwz%D2>*k_!WSM^Z_>5qL@T@Svg?hJDDAZNqv>y{~|t|{^66hr4fu8~-L8N`C6tRUJ9Ek7(F+1fRhm2%>XN`I=(%i4ICD zA*RQB!R~^ibS1_C1qmA{XWnBtlP8Dyd*`D{w}Nie>vP4CkuW-EGK5k(m0X;MDQPj( z>?6SRaC>-QGYV%%2)S;fHj-!c!L;SJFtGL}9W8taSGPYTpOZ!K?0^=O+M8iTMhcm^ zHBjT`WUx@ofZ`&35-CPw!rdBjzd0O(o?5ai=Ck<2=UUv2*yohBxdptJdx36G6y>%| z!AzSvY8`wJI$aJz$izqtv()2$oTbNYnb8ex0YfOhI)&n#b&!z_b1an^!3RqWu>;O!vQPMdzJQ zC@1L*i9%1nl#FQgUSCS9{cW&t$VBevGfA*4{3biccNYe|KZY_K%1Kh@jNbjv$!w~f z(yKEQqQ1zbw(S`ZDmu?z8Z5*SGaAXKIg5U=ISDf_m_yW!C(3!M6bRlXpys{n*#lFi zLswuO1RN`+vPaDn=hFxsN2k;9ycjaQoDHVQ`J`Q@izc4l(9!OM-|OVirSB@pLZ89% zbCIx7WYl3$J`uU3#`bXx0QSc zz6O^oDfHplR#0h=2gQCsedP=Kwy~4O_wmuU^(!!TRPS$*A=*`Dk?XcZu&GRROiG=B zOB{5#xjN0XC6tF>aw5pRzMEveR%qaHf{LzH!(5-8$|#)xT(+9eB^y2?S;8RnJt~7C zgU%C9DuPZy9l7xvp*4Lm6uX&Yiq0Kk+VW_bbvC3XPXzg+sU)2?3Hx8y5XAODXxQEy z&tf0U=c~xVVKnEECd8RJwvaLQ1g+u;aNzdG)LgFuOVdcW9b=B7zKb+DzJe~FI}W*m zPhn5z`xLZt6uP?9P`cSwsP#Opv^TTDYkMEks(=WxJzWBeesP72ibPrzWQU5wSIOn* zC@lZ=3e*l-hY{8-q@7~I`REHVpl^V3$&ys+-sXgo2}8Jw*j|vdXu;uvooKxJ1W97E z$=s?EghyJHF)edR0}V!#of^y4z@i!NhK7;Ml(#^`ra)Pk8cRgZll~ty{1+tbgqkA(XsqJeGGs4AB7s!)963(&YLN7NNuK(x=urm%o?HQ%rF{66uS@5(OmP@ JhbI3`?r-wjf93!H literal 0 HcmV?d00001 diff --git a/tests/estimators/certification/smooth_vit_results/madry_preds_column.pt b/tests/estimators/certification/smooth_vit_results/madry_preds_column.pt new file mode 100644 index 0000000000000000000000000000000000000000..ddb01f7cb0c305d331b10140f6347e48e6271c82 GIT binary patch literal 2764 zcmaKudt4LO62}*!5TaH@vGP!Zf`nKg%9~_o5GB$Ey-2lHeDDZCL0*C23!${WPyq#e zfGA=GrBrDZw4&@e$W^q~TCgA#Q9;p*RVgA0So%ml_V%{DwR1kRvuAhaw=?_4`F#Bq zydez3=QICj7K|Yi9TFD5HYhG$6_yaBjEzc+j&TeN(S$g}t&Ea;^O(TEz*K#uCN^HV z!XYS86A_gl2#QyQCPqYs1!+_<39<1(tELOoj7YT5P^+Jy%?w$tQtQ1c5y+!rLp1Nc zdMHrqFVxfKNYuQAJZ)6QC8~$a>DAMyht5+lis=f$X0?GrpwQc_HcV4$dx=shH4(8f z34(vchpDs;&WccK1ZuurfZ9l-76hn=Iru9K`4&-ohURPKbngG!QO%eMuN&1-BKJ1e z4f+}@TDcdTZBLNqZw1&=`6i4zm&f)bFS9>(T5V%_QnF8JsU1gwDb zqs~KW?!O`Oc>o^B&7mb}k7U;G&Vd!JE3nXAO6C^&;md0dm@>2+CLQd92RFs|w(UHM zcS||Xu`9{&;KOkG_y`nBT4hfr8>8f#JD_1Rfq9$+!?za_#VtE3ER(>f#s*;O)w1lL z9>99B1zuM=Vp+zAV7YcFv^+XPDlR=FMxDhVon!_@1dBMYnp~<);?IX^&B9+8d zd<~0#E}_lOOrctzl{KuMj7z&rfLzaj?ghCJ63_>mV*0_Xvk~&PT0(VFzLH$*dQ?Z&%QwN^{BRoH_&Esf z`=GPaTCUEY!IUg9UN}7qJ*~!}d-*UNl0F|lUG#t+aeYR%Q9hlWzZ%=*nNWYi1W#_? z0Re*P=ymoPyIt&pgI=K|oL@z(ho9q?Typ{CnL?JeZHJVv=aKP2>mgA!2#q(~G0}7b zHIFVNmewX%wEqkzK5+VRYCgJ4OW}qh2u#+A*wa~V=-Q#8Ucqu%aTTA6?W9;xA%}G{L($*P zhjTRz2lE3QR7ef*hQ5X#T*?reKp@WXT`+cy5$*o=d)ODd47|E-vP@5HWtKIIZFerx zZHYiCd~bm?rVbkVkCNkX2_%zExPFa*>J80pQKCY|O7k8~2 zfiZrjs5t6^qL1Ctb5Mv@UYj80OKUvtHJ{$)=a7!sd*Dt|5cWU2k;RntP+Vf_!&v@_g+ z-iu1otlWdO6Yq0h{>K%MC5iFaK^f*;%7o;yFzl^2tO~;!|5#U0eX8M)nYic`yoZWn3y7!!h1%1j;_{hqA#bxO15SRPKwSKEq6KaBT>#*_6heF1`$r z`H?7)>0@bH7q}gM2_5-Y*?NCI%?(WgX8Sg_jhO_^N9?hBh6`7@FP~Ij8HEno^Qb5; zgL-FgfnUU>aOao`j}1A=4NKYuFK#Z!57Im^;EQGm{Mij9-2>dt6_?oEJH5#5syw#4 zZv|H~+7mq<9)c3N2lqo?26yU=EoL(1bib1aTVtL~co&T@Y|b3GF!KqlX`Ii^GHQpW zemi1w)es*qI!#mIGk9Togw;VqmprMUP75qLe?2sqGortPhE=Ng1TB}yy{-R&8vX|p zP6FK*>kF1I(AOf0&ixg7y6`)b{Sjd-bT9s7|3GMe75d{?W`7cSq}{zn$WiiFnLqYw z{wEos(cjAa_DB2`1_p2amOp`K|9hp5_H_2&*!*hCWNX!QrmfS#BNHjmT^UA4iT;YA O2D*EmcIon8<^Br(VmE^T literal 0 HcmV?d00001 From 8c286474919459e223918d31293d9a7a65d1f2da Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Tue, 29 Aug 2023 14:22:35 +0100 Subject: [PATCH 47/55] Update to workflows and tests. Mypy edits. Signed-off-by: GiulioZizzo --- .github/workflows/ci-lingvo.yml | 2 +- .github/workflows/ci-pytorch.yml | 5 --- .../ablators/__init__.py | 3 ++ .../derandomized_smoothing/ablators/ablate.py | 42 ++++++++++++------- .../ablators/pytorch.py | 14 ++++++- .../ablators/tensorflow.py | 30 ++++++++----- requirements_test.txt | 3 +- .../certification/test_smooth_vit.py | 14 +++++-- 8 files changed, 75 insertions(+), 38 deletions(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index ab7ab24822..ba433c6a7f 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -50,7 +50,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d' requirements_test.txt) + pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;^timm/d' requirements_test.txt) pip install scipy==1.5.4 pip install matplotlib==3.3.4 pip install pandas==1.1.5 diff --git a/.github/workflows/ci-pytorch.yml b/.github/workflows/ci-pytorch.yml index e9b1a741c9..d162dfdcbd 100644 --- a/.github/workflows/ci-pytorch.yml +++ b/.github/workflows/ci-pytorch.yml @@ -34,28 +34,24 @@ jobs: torch: 1.11.0+cpu torchvision: 0.12.0+cpu torchaudio: 0.11.0 - timm: 0.9.2 - name: PyTorch 1.12.1 (Python 3.9) framework: pytorch python: 3.8 torch: 1.12.1+cpu torchvision: 0.13.1+cpu torchaudio: 0.12.1 - timm: 0.9.2 - name: PyTorch 1.13.1 (Python 3.9) framework: pytorch python: 3.9 torch: 1.13.1+cpu torchvision: 0.14.1+cpu torchaudio: 0.13.1 - timm: 0.9.2 - name: PyTorch 1.13.1 (Python 3.10) framework: pytorch python: '3.10' torch: 1.13.1+cpu torchvision: 0.14.1+cpu torchaudio: 0.13.1 - timm: 0.9.2 name: ${{ matrix.name }} steps: @@ -76,7 +72,6 @@ jobs: pip install torch==${{ matrix.torch }} -f https://download.pytorch.org/whl/cpu/torch_stable.html pip install torchvision==${{ matrix.torchvision }} -f https://download.pytorch.org/whl/cpu/torch_stable.html pip install torchaudio==${{ matrix.torchaudio }} -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install timm==${{ matrix.timm }} pip list - name: Run Tests run: ./run_tests.sh ${{ matrix.framework }} diff --git a/art/estimators/certification/derandomized_smoothing/ablators/__init__.py b/art/estimators/certification/derandomized_smoothing/ablators/__init__.py index d1f789cc9e..d727d2efb7 100644 --- a/art/estimators/certification/derandomized_smoothing/ablators/__init__.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/__init__.py @@ -1,3 +1,6 @@ +""" +This module contains the ablators for the certified smoothing approaches. +""" from art.estimators.certification.derandomized_smoothing.ablators.pytorch import ( ColumnAblatorPyTorch, BlockAblatorPyTorch, diff --git a/art/estimators/certification/derandomized_smoothing/ablators/ablate.py b/art/estimators/certification/derandomized_smoothing/ablators/ablate.py index 5e01b4ae19..3970b5b862 100644 --- a/art/estimators/certification/derandomized_smoothing/ablators/ablate.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/ablate.py @@ -1,18 +1,34 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2022 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +""" +This module implements the abstract base class for the ablators. +""" from __future__ import absolute_import, division, print_function, unicode_literals from abc import ABC, abstractmethod -from typing import Callable, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import Optional, Tuple, Union, TYPE_CHECKING import numpy as np -import tensorflow as tf -import torch if TYPE_CHECKING: # pylint: disable=C0412 import tensorflow as tf - from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE, ABLATOR_TYPE - from art.defences.preprocessor import Preprocessor - from art.defences.postprocessor import Postprocessor + import torch class BaseAblator(ABC): @@ -36,22 +52,20 @@ def __call__( @abstractmethod def certify( - self, preds: Union["tf.Tensor", torch.Tensor], size_to_certify: int, label: Union[np.ndarray, "tf.Tensor"] - ) -> Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"]: + self, pred_counts: np.ndarray, size_to_certify: int, label: Union[np.ndarray, "tf.Tensor"] + ) -> Union[Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"], Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]]: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. - :param preds: The cumulative predictions of the classifier over the ablation locations. + :param pred_counts: The cumulative predictions of the classifier over the ablation locations. :param size_to_certify: The size of the patch to check against. :param label: ground truth labels """ raise NotImplementedError @abstractmethod - def ablate( - self, x: Union[np.ndarray, torch.Tensor], column_pos: int, row_pos: int - ) -> Union[np.ndarray, torch.Tensor]: + def ablate(self, x: np.ndarray, column_pos: int, row_pos: int) -> Union[np.ndarray, "torch.Tensor"]: """ Ablate the image x at location specified by "column_pos" for the case of column ablation or at the location specified by "column_pos" and "row_pos" in the case of block ablation. @@ -64,8 +78,8 @@ def ablate( @abstractmethod def forward( - self, x: np.ndarray, column_pos: Optional[Union[int, list]] = None, row_pos: Optional[Union[int, list]] = None - ) -> np.ndarray: + self, x: np.ndarray, column_pos: Optional[int] = None, row_pos: Optional[int] = None + ) -> Union[np.ndarray, "torch.Tensor"]: """ Ablate batch of data at locations specified by column_pos and row_pos diff --git a/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py b/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py index 07062084db..356849ddb6 100644 --- a/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py @@ -111,7 +111,9 @@ def __init__( if original_shape is not None and output_shape is not None: self.upsample = UpSamplerPyTorch(input_size=original_shape[1], final_size=output_shape[1]) - def ablate(self, x: torch.Tensor, column_pos: int, row_pos=None) -> torch.Tensor: + def ablate( + self, x: Union[torch.Tensor, np.ndarray], column_pos: int, row_pos: Optional[int] = None + ) -> torch.Tensor: """ Ablates the input column wise @@ -122,6 +124,10 @@ def ablate(self, x: torch.Tensor, column_pos: int, row_pos=None) -> torch.Tensor :return: The ablated input with 0s where the ablation occurred """ k = self.ablation_size + + if isinstance(x, np.ndarray): + x = torch.from_numpy(x).to(self.device) + if column_pos + k > x.shape[-1]: x[:, :, :, (column_pos + k) % x.shape[-1] : column_pos] = 0.0 else: @@ -275,7 +281,7 @@ def __init__( if original_shape is not None and output_shape is not None: self.upsample = UpSamplerPyTorch(input_size=original_shape[1], final_size=output_shape[1]) - def ablate(self, x: torch.Tensor, column_pos: int, row_pos: int) -> torch.Tensor: + def ablate(self, x: Union[torch.Tensor, np.ndarray], column_pos: int, row_pos: int) -> torch.Tensor: """ Ablates the input block wise @@ -284,6 +290,10 @@ def ablate(self, x: torch.Tensor, column_pos: int, row_pos: int) -> torch.Tensor :param row_pos: The row start position of the albation :return: The ablated input with 0s where the ablation occurred """ + + if isinstance(x, np.ndarray): + x = torch.from_numpy(x).to(self.device) + k = self.ablation_size # Column ablations if column_pos + k > x.shape[-1]: diff --git a/art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py b/art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py index e51a0f88c8..e4b927358e 100644 --- a/art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py @@ -23,12 +23,16 @@ from __future__ import absolute_import, division, print_function, unicode_literals -from art.estimators.certification.derandomized_smoothing.ablators.ablate import BaseAblator -from typing import Optional, Union, Tuple +from typing import Optional, Union, Tuple, TYPE_CHECKING import random import numpy as np -import tensorflow as tf + +from art.estimators.certification.derandomized_smoothing.ablators.ablate import BaseAblator + +if TYPE_CHECKING: + # pylint: disable=C0412 + import tensorflow as tf class ColumnAblator(BaseAblator): @@ -67,8 +71,8 @@ def __call__( return self.forward(x=x, column_pos=column_pos) def certify( - self, preds: tf.Tensor, size_to_certify: int, label: Union[np.ndarray, tf.Tensor] - ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: + self, pred_counts: "tf.Tensor", size_to_certify: int, label: Union[np.ndarray, "tf.Tensor"] + ) -> Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"]: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. @@ -80,7 +84,9 @@ def certify( the predictions which were certified and also correct, and the most predicted class across the different ablations on the input. """ - result = tf.math.top_k(preds, k=2) + import tensorflow as tf + + result = tf.math.top_k(pred_counts, k=2) top_predicted_class, second_predicted_class = result.indices[:, 0], result.indices[:, 1] top_class_counts, second_class_counts = result.values[:, 0], result.values[:, 1] @@ -107,7 +113,7 @@ def certify( return cert, cert_and_correct, top_predicted_class - def ablate(self, x: np.ndarray, column_pos: int, row_pos=None) -> np.ndarray: + def ablate(self, x: np.ndarray, column_pos: int, row_pos: Optional[int] = None) -> np.ndarray: """ Ablates the image only retaining a column starting at "pos" of width "self.ablation_size" @@ -206,20 +212,22 @@ def __call__( return self.forward(x=x, row_pos=row_pos, column_pos=column_pos) def certify( - self, preds: np.ndarray, size_to_certify: int, label: Union[np.ndarray, tf.Tensor] - ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: + self, pred_counts: Union["tf.Tensor", np.ndarray], size_to_certify: int, label: Union[np.ndarray, "tf.Tensor"] + ) -> Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"]: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. - :param preds: The cumulative predictions of the classifier over the ablation locations. + :param pred_counts: The cumulative predictions of the classifier over the ablation locations. :param size_to_certify: The size of the patch to check against. :param label: Ground truth labels :return: A tuple consisting of: the certified predictions, the predictions which were certified and also correct, and the most predicted class across the different ablations on the input. """ - result = tf.math.top_k(preds, k=2) + import tensorflow as tf + + result = tf.math.top_k(pred_counts, k=2) top_predicted_class, second_predicted_class = result.indices[:, 0], result.indices[:, 1] top_class_counts, second_class_counts = result.values[:, 0], result.values[:, 1] diff --git a/requirements_test.txt b/requirements_test.txt index 9e67e12dea..b76cb982e9 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -35,8 +35,7 @@ torchaudio==0.13.1+cpu torchvision==0.14.1+cpu # PyTorch image transformers -# Due to conflicts with the old lingvo version we do not install by default. -# timm==0.9.2 +timm==0.9.2 catboost==1.1.1 GPy==1.10.0 diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index e7c431e7e6..0fc6d07bd1 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -16,6 +16,7 @@ # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import pytest +import os import numpy as np @@ -300,7 +301,11 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 output_shape=(3, 224, 224), ) ablated = ablator.forward(cifar_data, column_pos=10) - madry_preds = torch.load('smooth_vit_results/madry_preds_column.pt') + madry_preds = torch.load( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), "certification/smooth_vit_results/madry_preds_column.pt" + ) + ) art_preds = art_model.model(ablated) assert torch.allclose(madry_preds, art_preds, rtol=1e-04, atol=1e-04) @@ -314,7 +319,11 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 mode="ViT", ) ablated = ablator.forward(cifar_data, column_pos=10, row_pos=28) - madry_preds = torch.load('smooth_vit_results/madry_preds_block.pt') + madry_preds = torch.load( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), "certification/smooth_vit_results/madry_preds_block.pt" + ) + ) art_preds = art_model.model(ablated) assert torch.allclose(madry_preds, art_preds, rtol=1e-04, atol=1e-04) @@ -327,7 +336,6 @@ def test_certification_equivalence(art_warning, fix_get_mnist_data, fix_get_cifa way by doing a full end to end prediction and certification test over the data. """ import torch - import os import sys import types From 513d66844b96af1ea1cf28a222488ad7078bbc6b Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 30 Aug 2023 13:25:26 +0000 Subject: [PATCH 48/55] refactor test to remove cloning from github Signed-off-by: GiulioZizzo --- .../derandomized_smoothing/tensorflow.py | 2 +- .../smooth_vit_results/madry_preds_block.pt | Bin .../smooth_vit_results/madry_preds_column.pt | Bin .../smooth_vit_weights/head_bias.npy | Bin 0 -> 168 bytes .../smooth_vit_weights/head_weight.npy | Bin 0 -> 15488 bytes .../certification/test_smooth_vit.py | 145 ++++-------------- 6 files changed, 33 insertions(+), 114 deletions(-) rename tests/estimators/certification/{ => smooth_vit}/smooth_vit_results/madry_preds_block.pt (100%) rename tests/estimators/certification/{ => smooth_vit}/smooth_vit_results/madry_preds_column.pt (100%) create mode 100644 tests/estimators/certification/smooth_vit/smooth_vit_weights/head_bias.npy create mode 100644 tests/estimators/certification/smooth_vit/smooth_vit_weights/head_weight.npy diff --git a/art/estimators/certification/derandomized_smoothing/tensorflow.py b/art/estimators/certification/derandomized_smoothing/tensorflow.py index 89cc2d58fc..6cc958acb3 100644 --- a/art/estimators/certification/derandomized_smoothing/tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/tensorflow.py @@ -126,7 +126,7 @@ def __init__( self.threshold = threshold self._channels_first = channels_first - from art.estimators.certification.derandomized_smoothing.derandomized_smoothing_tensorflow import ( + from art.estimators.certification.derandomized_smoothing.ablators.tensorflow import ( ColumnAblator, BlockAblator, ) diff --git a/tests/estimators/certification/smooth_vit_results/madry_preds_block.pt b/tests/estimators/certification/smooth_vit/smooth_vit_results/madry_preds_block.pt similarity index 100% rename from tests/estimators/certification/smooth_vit_results/madry_preds_block.pt rename to tests/estimators/certification/smooth_vit/smooth_vit_results/madry_preds_block.pt diff --git a/tests/estimators/certification/smooth_vit_results/madry_preds_column.pt b/tests/estimators/certification/smooth_vit/smooth_vit_results/madry_preds_column.pt similarity index 100% rename from tests/estimators/certification/smooth_vit_results/madry_preds_column.pt rename to tests/estimators/certification/smooth_vit/smooth_vit_results/madry_preds_column.pt diff --git a/tests/estimators/certification/smooth_vit/smooth_vit_weights/head_bias.npy b/tests/estimators/certification/smooth_vit/smooth_vit_weights/head_bias.npy new file mode 100644 index 0000000000000000000000000000000000000000..340c4215be2b4c0bb25f7ee93a98a82b1dcf532b GIT binary patch literal 168 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+l>qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I$-20EHL3bhL41Fi!RCVMre2HTdtsoP^Az-JqCahA0stNrF2Vo+ literal 0 HcmV?d00001 diff --git a/tests/estimators/certification/smooth_vit/smooth_vit_weights/head_weight.npy b/tests/estimators/certification/smooth_vit/smooth_vit_weights/head_weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..2f718d5fbf884d059a198d094d1976d18fd71899 GIT binary patch literal 15488 zcmbVT_di$f`%khG4H_tfD4`_tT$j?&PC`RcX-Gzjc1bd_BQv9gh$4}^&UHzpy(P3H zC0g1`diVAD7ry7WbN_N4=eo!9eva$FT$efX7EATm++&Bqs?`B2HyYSz8^D_J21eQj zYu0ZJ+_=JT+4_yER{w9@euYoK>hAG?wJZErcfXG`9c!d*W;K44k@ha_|KHQw(Jz(< z6abrz+X|z0O`)=93Rv&?gx{P-W+U#G;23Gj@|~1&g-$(CL1zROt|%DPYA(l^hP`Yfp*Ae1|FdP*X8m3XhWRs zaGnOl*|Dv=Jf~DiL&52*@H1u|c6dl(!KWmS(vPB32^U1)B~@H{>Z>@hzzIFB8RE2Y zefWxkFFyXE!5`g!iFOxmgL-%r`tB^GGKWlzb`WrARZrYiHAnI=Y=bbiriK+#$I%M2 z|7^Yt1EItF4?H$4$Na`V_~cAKc-gmwdvEu~mCv71N`4Wz7+xjmXnF4G`^zf&qbFZl zTm;*6`|$W>Irx2L0_>Wi$B{BNye@bPUpr>Z4tw{Z|M*zg8=8P7#g|cX)QgT7#o;!U zUNBMD9`_zku3YHu2p8gauyh{cwGI6+r`VB0Y+6WN+XgT6wZ%Qr+n_c%jM^$Ea8$f4 zM!fA1TW>|7vVJqW6Ts z|3ugswHu8lE{BS(@i=cu62vVUhm{ptyl%9!$m=HI_C?uXb#*x^Ij%!%*YP+lK|vh! zMTWdid6LeCGve$(HI$#2$=lBd;hvEFV)1M#lHR?7{;WWrxM3Mb|GGhTSCi@O*14D( zx)Dn+RzXzO6`=%9L-fRK4Cp8pufC7sNaItu{k;qxxtqy-PG5w-&fyZ3m>6-PlLg;s zUyG6bl+eOBn5Nu&Km#0(;@L^=LRN|je(yV*jz2Ud@nI~EHkpFgP60cakhPcd2CDKL z!9mYPQ2)hILd+i@ELQp?L8l`qevGEbmB(Sm_SqPFSBmQ*reQ#J1>Edog8q(kdAfE8 ztE{~&6q(;3uLKj)8Bk=SP(PDDdg$Yr5%Dx{fEOCvn~hq&lTd$p0AkQAs!wTw$DK}8 zV@ zBga|c-0Fkms8t3TZ$65N)+IDx*%)j%V}?_!&+vlHdZ-gJ9TL6{q%B_(*e9P?_Z*5HKbk46&t6=?;ixg`i&(eIo8$U!7y395jY{{S+UYvD zep^r6pLST{W^aNDvcq}td2Lv|>mz+IoXigom_WK!8K`6zkVGvEj7O+(Mr-#!_Z!b^ zy9`Nn+*V8)62fYIOu_KYMAC@tiE0ZvDf`D(!6Uqs^L$D4FUu0!@}fA({}-$}=_jTr z#X!u{)=J^X0o0m#kqeet;VOd!?wW6l6TIsoFEy5S=eY94^|3tSmIQn)AJVeE+N^M5 z4%3pqUM!4trxNj78dy7L%=JLI4*1Ai8HQ_v-8 zTwgbdW0e!(drdD`dyk-HzaNGSsuqmwq%lM9E4oJ2Qm;ZkE`GfTt_j0ZyTIbJC$2n8!)ksoZJPfmp)M(3$5W0V&2peV&;_#d0!pqoH zPSqI5e}uvG4WRd zc1*uT(F&t+dPF!EZSv#MJ5+3|a~4QW?3{#C?pD$)!x1=2c_-Rl8AuC_deRc}WSYCb z5x0NbE!kN#3QBKlvhE>AHap-hMi{8@3zzx4bz?fk_HKZdCDEjGL6JK@?}b0Mm&C6e zkBiqQu#LPnc4dX5%%DabKDJEUe51R5uKS_Ux0!C;o+8RQZWWupo1L)?oBosNk{m^ zFH`i-&Eh{cn`~~(ye^rs#|6LCcf$SbFt~ouk1iw+#a{Y-Ag9KXR8}NVlEeo4f0)SE zgG@2@%~(9u*hnR&_o#XGLVW1Bik%G0aqDCoEX&j4Nd3h$^hhL6AK}JFC3~=MQ6#0s z4-^ZgpF;binXKLz!>1H;sh=+K8p;Qao(m;4U!Bn@u@aiLtD|(x1XQRdsOYhdlr=o@ z&ZAI{575MYE;ISf&eIgPcR!XEeHQ9Vx`bT@b-1TS!f{V}arCoa;)>VaJRx;ElzuIy z=~5jqqILw^UR*;zZ{O#~MXfe@^?mv11|@L%Vak(D$B1d^WBK*`7ThPM!oZ$mQEl=l zPH!n?&zcBcRU^ac12S!fKUNpAFS*j#{YgACAf2)oDnj`?EmS?3!HORmuqGyu=M{BQ z*^7LX`P~V>B*VdQ;yLu2IflH4lnA*Rp(JxV9`+~X!1Hb9RK8%DkUPczFGmWX=bUUE zm+nP#du4#qdwZ-N_8fFI6X<|yAAI<&&PHMO8(O;8j+?|_arL1^yx%Ar2KlUE$C$ZX zbL{|{9_^2E=Z8UjoC5gd5nt*RCSOgc-mxSv8NrGN{@xTxdrE)6oa z8^FIGr*ffx61;d{zmdp3Ti<~EBbXD6fL?GY%ycMhpbJji(3 zPhmsLX6_FcK_je^dF z;qb8S41C&o2h3N#qb|!}P_;WDx&9>?zQ()oTZ8_%$YCtTHFv$<>A_Z!HXT`S^R3p5t8I1pdr$_Y3}21WU&i7X&v&r%VHzcV4P?^|>tXX3 zfxb2^7W?g+3Ox$*K)H3lO;tt#%8JGC`+231S$7RGC7ygjy$}11x8kB}dEojcmogML z;=FwttPxy|M@%e)uTz`_+k%0xSvL)gLK4Yvh(En}ehm(_Jb=pMrz9V8hf&*!Gz?4~ z0`fz`;Ox}??7q;3FMS-sYm63?i(d+NIW>W6r4)QHuBD}(&b-ciH(pu$Mby+%fPX`^ z_{aC5I9qoRX{aw+&P<077WE*w})Xsp{tl5-YhJadqn23qnP*AQ@eYM zfK$#vk0o!!$=l5M#lK@9)(jy>kGtaK#|`4>^LOa$h;&+!QcrOYs>HUmP?!{dx8l|E zL8vl6hX%j(7t`hqV8`!+aLS~oV0^C@c8y6Aro_G%P5*i0lYS>4E@LO3_A`gvJ;4;V z;x$=*GvZe}vYCxn!^%Sf_1;wvmHC~L6EEcHOH35$B^g6i>|7jouoSKotl>oa7@phk zR>)czZ@uI93$BXZEl!NF=V=kHf=$5`vhCd@M(qxP=_k&URj31I^+4NX3>v`WMl#6$O@%@0`e9i}0#+`6z)M#RhM?YhJV?p_{~SKX z>JAb3qI?()Rr8TF|BkSE5u^)-4}U_Knhv+&E)JAG#`{$#W32Tpu*A#4^(&3Ez+nY$ zj7ulz^$6Zp7E$fay*Sn-52h=6;E2QKoPj-2Fw^3vZx?g_eK9o1?t|zRRx2#{`jG-I zH(=qptuVWKB){3(DhyTGOW_-`DX;RdB(ARtuFuYgC5`svwfH@}e6EScCIuq03C%}i z8hH64oD2$}{zHz@jLnVW135EVIr<3CHn+rxD}69*zc!cm3Ba!-w0ZmZ*X;NDGgQ~9 zK+{!4@#YO#SfINQ*BxqvH7+7}+Y&Te?GvVcQo?sZp@P=cNigP&G7Rao#P5>@xL|RY z1~_k~Cfl9R>yRD_7p%~4_5#wKtaWa<-d{RCJp+DSOcfrCU4-Z+%~S4d;#D2}_~Wcg zF|sUWftMK01ooAhIaYcykSi`^1-I1(TkVoy=v3S#O z2v#j8ar~#vBt2+4!tFBndv_hbdOeG$|EQ%~J2%4e2Tde>&H*P^{DJEOYG`GCe;WF} zMsQx{gwb0@Ld~$5Jh@x)xD;v5OY;?QrAi3oe80g4DTX-oM=Bgyy2<8s?FxA8SAeJU z6mV964AyV@53>6Z6-W=nfGVP2PcrDUn=0=;?#}_e$Fl34J@ha%g{8gL3lryD5w(9M zb@P}Iu4*mhzd1kPPhUGuZ<`==){es3V;ezRCX}21+Hq^oIUqURi;GuXg=0k)U{G@a zvZCY2=Y^_h>AeRWBh_L(V*?=dV<=(ewaM-FukZSLy2cH7|K2ny$z3Yv8OqT(tU9LhXB0u$sNPb6{1<` zRk2#(5A~`p;*_HuqS9Mwip;b?#U3rfzk3^D^5jLBeCmcc;P?UYt#1nS-h!|x^)m$y zjR8HMeJq=;1BT{Sq;qr^8f&&vq0@Ry8Wcl;n@(c>lMo&}tUsSxVu0J9`f;AjAS~~1 zLhappPqMlJr~i^;of|;G)8`4d4k_bpRZIN0$C_qU_TZ4fGAj6WM0`~~45c-D(>U4d zkRHF9n^yiuMx9oec;X<;>u*ZW9`E2d&nY~_XgJWYl2-$dBZ2viveqC3^nG^HCveg!Y257^~7iYzo&BZf$xiD?JHHNewkksso5Q>U(DcdPS;>5*}_TN&h z9&Ew8Q{%{SS|2o8@|Lm#R&m#*9++RCjmDPB_`&TBeN);5FVfagQp^vMzGf+Cs1C#> zgZ)L#oF1S!JCETB`R`#CJc2o=Vd^YKBEhpYaO00&!K)jE#XSD6pYI5&)XZdxFaSR&2y|-CTSj^eDtVf?Eg10$TugyAO}Xx-~ja)};Es#h~B6()qh*^|G8f(gNB*YuW-R&@zC z`?d+ojwplK%O;3%*vPw+obgkMJjZ|dAgXNrCrrQSgtzE8M2`C_Mh13}d7c7~ua@HW z&AVx5_H{tjp8P+3!pZ!TWO+9>MeNY$?+_0ULe<57zhp2cLLC>R`Uqck%Hz~f1Ad}B zm!adm4YvlEVSrqOE}6ts!;tq+NZ?w%v#{A2X(^;ar95(h1~ z3O5IxxBiws4*%refIkykK(k^D=*dgO=WAM^@%w%;A*MdEp%50tK~N75ndAZy$J znDc2kei^cYni>$!rUg^rfkmvf<_O*Et;?@2O~euUTKw}!DCdlIM86GE{Ln0js>p)3 z=UgSz@e^U_*>VigT>z$AcHoq=1$<>fGQN??q2Ky-!o zMCDh?994*3vkpV)EM2Vacqo3hET*rIj?&fm9dz2R5O%5sV(0yP^ra;NgD+MKBh2Q2 z!ROa7*k2DjeY*hmzZWLlct;C+_M^zF{@wTS9=szrf~nUHG`p<=)dL<2d#!>5i!GJ# zj00)2MIfKnP3A@3id1`L34hN$0@lG1Y?9~%7kirW=U-RZ;^r{H{@yC?yqAcondh)z z2BNHV3H%)ML->>!&%R#3#$m^VGplsDDZ)tDZa5e8ofUXSyB-EUTY_hN?g-nnOX+%( z2Yu@p1J9%ez~aFb5cop{!|ILEVQM1u=}{plP433rR~OLn^hj1{DTPtV5#qIi$FzR# zdqHh#I@~Qx;>?6pkRNRWsZVkwhWUZmuhJCHKi>s)kMqS1?M7%W*$=PXuF*9u1FpYT zM!z0y1P>)gcq&(Y}NWWeK+s_8U* zw2@gA10J(_klr_fSDU-%sWpQJpgoEE};LL}g zkl5yr7pHmQb=Q7auHVgLCa#3v2KU8zcO5Xo?~JhR)FO2J*-9~)jnH7G%wtCvgG{3( z`)ieBOq3(ah1>#tpH1C7Qd**-dWbF^wdS{5!X<%K4lt;ABo{PTP_kb*x7EeL&#k-J zI3*5;?zY6Vd&L-T4@_(W0-tQqe=qxuhErDqVhrs=}v2^c2>N&k1 z;o`-jFiM-co*!GKNc!pzrX)9W#FPX$x21^p&C$jMqq_Ice+}^Y(=$5Mb%PD<#vxol z9`W|0^@shI7#dv;K0Wg!8E0CcN;ga_Y;kAV1%vtIwPCQ#wFWFko(H4X!`R;Z5j}kR z3X~Tzj5O~glPl>MJmVs4H5>~ibFRYer^k8a;SE^O+Ja31+Pp6)4br4aL0;{#johfY z;y2BG_~?6IvS{-bc0b&IZpNKHXg?#AeHQG7)+6d*9U;YGcTmSH|M}`?lOM{Sv&rYRCZr zzj1wtCEfWFjA_>#F{mR8>x^c=13m3323x)NftzkDcAEFa z?CZI_O?x&q`Q4r$cz?uZTO+WqEz+GiY?zLMhj8T+MD+Qmcar!*|j9sQD6i z=gAybzJyDhVysujnqaTA6#VPQu+e^M_o+U!+lP?DHV1iL+QXYXBsDxY-v|vyJ%Hnn z7qD-;2^~!K1MTiLdA_0qp8rs!fG5+iZuw|o*@l<&toux*H)pd5%%7Y2J* z8w;*F6ZmH1Z76ihBE8jB)PIaKznU-b+kX={B497B-nfu{`yymn-Gln3NZ#n(ZtY@y z1~+E%ff?4@g*fNvus-LN7i{f}m0Z zo~^V3Rrfp=Tu0cVCiliw9YSPx@$&Ov1eFRwW^{wKcUS}(eAgBl z(niw}nFToc$YnU=JBy5GPQv%;OZajAa+pzj9iF9jLg~z6_`a%(lm;FY6t6|H(x$<9 zQ?Z2nzsmBVJ)#)YP{@gIxAT-{XBx47JubQ$hMmpUJYs?wwEl7F-luIgQBMZ);AuUv z;l(-H9%#%ST?N9!*M6MfeqH=?VkliXt6pjO5$Jq)6u3;v7gqht#KMigVc9w*TsGZ} zYqCNhWx;BGkg5e8p35=*#yObvAqwB*xx>186W%vzm~hO@f!wT^KTDlJuXA^W(W~+W zzh&bwK&QsWa$NwF6zZY=%MVZ@8P@H6DhNqys$s{1WpqwuAm7UB#pR6y;A^Z5%dQ&- zMLv7kwQK>leb~=&SrcecS|Et>UBb9~tHfFP18gQ8TZ(T^NysM3!eY@u|sV_A{NuKc`w^P=J7ul8=m#;**C>F)JX2mOG{JV9cX$8&>nYnp+t8H;V@SCXo6%2YYPW2=nVbxUyXt zN8TI^hjK%4YniG|Nmx9WzVF5uPoa%FFxZ^1^?`)Egdk*Lg7N$M$E}E<;xAt%6^E4G}B)+<`trS|!pO zpU@hup8UY=9;Le_u%-GL(9?0nAu&bt@th|-$Z!I;9@}_qkvD$bq{h_;4x?P?WU_GC zfujsP!O!`GP|;?I>lAL%QdMaXs`WW<>ppr@SWn4upQzXCzBrgd=<9bCKKkMqW_xO5 zz|;ZO?F$C-OKDAhxqTiTno>wdzis9Jem)mVEh;d$ZX>6eC*tUOP0%bH&8PqS1dR?p z{BDFi4II6IUY!05G6hYrJ4>2fb{e6k-(xXR-rOe5;}njqDdef+VxS}bFD(q+!n;cH zDSv)3=#Oa@N1tsLnp?`mGc)b@g>abC{hoo2Qz)*z6HH1yB2Xpfj~J<;hl91Fd8*K0 zZN9o(+|y#j@~(3zb$S|YPD%pBIvvg}kKvtX)G+r+B@YUI2&xy75j~`6t;Y{Ia7Kqe zUXAAdTZVB3F*E2%d;M$BPp+aPiBhkhD)mnB002;o~MWwnlNvjX{F+@j`If;DXN{ zn$V|4flNe2wz7IdC#CyP+NL*P8tjRRz3z)K*Hrl1lamnPFNcP&-}1meC9ti!SfXNL z&ZS!qU{r+zVlO>hIiisoLw`a-(hxkL{TW1cKa6>&jq6PoLttw!UX(MF+g|lytFH0g zSpy<*Z8gl7kw>@X>il80GFFbB&U22*amQIZmdo0M;Sv#7^m+haZ@h=x9&g3gx@@*s z))!k9kHWNnwlwRR7vI$nL-&~pH1C!t{?}yBK1EeDX^jKQPST*lu2MEzr+|647m=dz zSaNk7gX<3lQ@Pd@SUYe>{dch_M4@AP(FIdBi}PR@Xfql1KHiOJTp25*6pdA{JW zC5)0^Oh9)NJ;|tidE5Q!w}dJwFaB-4ANJYV@tZ^T*3+I0#^$ltt;W5#JJ(emLl?96 zbMc5)LHmpYuQaK{;FY%MqZ$TTFSYQ*r-}T^SqVFGsc3BjytF-J7D9Vo8r9Tp}5^T1g|fSMTJEDbC*8_ z(*)l@uK5*BUotm}&yC`_Ato6fTs#4f#>auVRI7OOWgt1vIS_6@sfO^lQJ-%VLT+dSJTR-t^9Ot8qFBfKqeOsh<4j9k=Dqb^Z}U9s^66e-rZ~P zz7%hmug+E{YN*cc5KmWLh7anS`N?NR*lU{&1B?6O<#YQ*@xXKF_g;g5ai)e7y8o0DAAgQ# z^BYGcVX?YcWiLV*X=9!8U#f*BDCbau#mYRaA`E)|_zPn-ZMpcRG8S?XO_f&?&st5! zL1~ltaB3~d>?lLYy3zRlqbhv(Xn_mSE>l|CXHwtcN|o_5Ab*!GzY9sXkvrptiLzhd ztGgFGdetB8+MBGuOO4|4+yor_p>@{SVs-0apRf_I>7Szq4yUw>Q?7|)B|Kg3txFN$fkCvo!ZJR0#_ z2bLeaz|s1H@KRMMN_^Jxy~qIE7Zz+&qB{oHzVE?r`@R&mnOX6q^&5DtoGG+#Jq|)? z8ti|jiPst}krfl6$-WvN?|edOcJuM*k1TP=awi^msuC(D9K`raL;RI<8a6C?Mkn$Q zlfiCx6ek_zagE10JADSLru$>_!yi!p?H##T{D3F%e&lHp$1~S0WhH~X5K(N3RR`5Y z%Z3(7#g8Jc8yA3Aw zqiG{EA?KYh{P)mVun^A(8H4``BleWwohVn-GQ39j^V2{tcP)%x$9PLI7^kG`;DiHv zAi#Zr_;H^i8nw03`igX@mi@0=!*v4PCyC$`6vSqqT4_oBGpMVm#J8^7d0nCzxj%}A zJLfd`=i6LSQ>Ci%MeYOiFFp;HXWrA&WIL9RI10%@VKnK@URW^HnZu)o;euu-E(r^! z#y@$S`23@dcg<<=v6X_YjosSFr3EYG zxoZ~%qvC1&PSH=Wsm#Jne!Il@A`P^&AB>4%#T>ZsIR9Oc{W^SB-5VaAUMAW-c?s z*79fYX30Iu^mr+b?R$jY$yy34bDa69M;dtT9m^flLqLZ!iq%$Zw5gXU zy{rcfjjg5X=cVxe_(Qt(W)It#KBT*{nRL*8FSxEyXOCSgaQn?r$>xXi=xCh#!9H=LMp<)wlc6gx!9-pL9T6;bwhyJ567RfxPV!1HO&66wb zQhA%}CdvGl$8DBtFY0gJHQa z%3y=&)OwtBvfAhg+_1^TOmeM>XC%;A z`h)(s{Nyze zPffCs*4E_7S1!?yzuO_|T!lDzNg?#^zF+Gm+4CIz39$UUFFUsM#>DRf*h{JeC$5u6 z&Y4eIXKX;|n`y23KArR@>cfen19*G+9-c3|6NcKop@nPeKq@_!5?<$klb#}n?byNI zb*0@{SP6HUx_Q~)76RS3uuyG1-ZKV{oU@IO3lGToPb&G!3#f1D%xY`iiWdi&VNAk6 zj!m3F)8f^6#@QB1bk(5cQ;hNPA~jle#{lgfJQg#z)$rGmy*SF{I;alOW}|{ItUm1u z<@R6U!HQItwe9xhPs`(Xdk?HI`ACObo`C)_X}+?t8rI9?Sx-=1i9TqI>aXL0i*@<5 zw1kyEtjFGS=E1YvKzd-@AH%nNqeqL?al*mzqTc62oH4!@_vY;c%f$isv~@0PPMpi> z>$|n!rMhJHMTsZgNF}Y|GN}Kp7khYYpt#a_!M{VBZElA1kiEA>+YbvV#e<-@Yz&N8 zoJ(7lycOcdPJqXmU9?|XqzPT8!GFnd(OO0ZOQW{p``K@V<@X~|p+N#&1JpV4@Cy?E zP2_#rW}LiSjqf+fa@ue8s*;EOZR@t$;mrl#s6H`Y7#OF+v-iz{@Z+Jv=)xj#Hm;+* z)8RHNWX@7Ashzm3UXE8~921A!4Cm^verOdLO*L(@7M`5)ql(*9)EQ`UA+7{XRD z5F8@ZIXizmjWEgKT|uD~Ga#DJ5ADLs_wP{RNFBN$-5Zj1%7v?5mr3J$cW*#b3T=&^ zf&abA=gt$Fm}jlYF={=~S#pVtI<9o<#Mi;@w7gLBXF1PkDulG@_i2HjA2{Xi?)K0B z3YSL}i5>6f@yO{GxL9z)8AWO2DVGO%+BFn?E=RQ1nat|lzM+%mVD7g=6|V|v{9sfZ zSPbhWXe2p_%L^N5;fLAW|HyrDL47=2o|A~1vaWF-n_g8jbFDe~!e-&(t8ZLZdEl1=>rF1} zYDDdn3Ygqaz_{7XDu&2)u+gJAId04lRXtGH|u>P?OPt5%)Uh=BJi;C7PwD_68%+NFT@G02g=Cxw=4(+v&0!c z`ry)5T^dvVgKn?cDQb8O=H2!yZzXcHRAGOuFc#=q+l%_YImyCe;^sExw5G0F7Tr5f674fw#yAC#Q$#^D2P zxzgYoHtlqQpb7WbM0FVUshPg3D*|r;gd~~6n{*aH{8$X zu&)ucBfS}-rq`0^wP~QV+7y#v3O<%nr5{z&=$;vi{S~(3$xZ(%CzUGk^z6^Vvq9nP z6<;qp86UrUi_iT@#5uw;emAZe)-6ni z=ch_4+k+O->VL!8XkZ08nI3}8%d_zPmc`;3^$l!3E|2!>8(~sux8}QRI~KZ*=Nh$U z*kUh>%28L5zl#^ufQI`^uwI>uOrtYaUNBn~$^Xr8s_S6m9CKh05Lh`c2?6-sgB9hu;jP z%5XC%o3)MVekL)-t>GT;vbgExZ!l1B<-hApvBb<1KV8~}hhtT6*qG9q7mldY5(NE| z&+&!AAe-XUhj8-Qew$Mt-@rA$qvAr-b)?08V2Kb3^W&na{Y4$_nEVzVRxLy4j(ynf z+s)I~oCUeiSbSO9i^uFzK-o1}G=AU)OxWtdCA^$ReHp^Cw+?h?!4fHMm$Hz(A(a{CdLx%jW@_J*t3zs*^z)RWVz0H2Yc3$INzTA^g7+!sxYk z>2zBRZz*%cOLNw6UmppntnQD;^QU3Q;Td$WeHIK|6v-_36vQuc<;&^d=ao;Xtp__rrfbG;rd^40H{gj>=CodG?Sht_-|F z)OMSlZbiUEu>kwOET;F>5;R%=McA@PzG|`V8?bZHuUgz|Hrgyo1l!p?QQP=Coj2@+ zu5k=wG)}`7r7*sH@&*l9+yK984%@u=b_*sSNu;5N_i&=i3Ve3Ahh)isJ?zsshfCjk zbED-r%qy3(xin=M6v_y~?VwB6kycl5=>-e??eh%6UDiQs@=>AB%(dJZx&%K5h2!T$ zHF4j!6zYHcFa0(hhZI?ko_;Fmc*7G7>f>?Ke&B5{)8TcjCs_t(LA^~ne%zb^S(3rf zcY6-(Fu6_o_eHeUi=2OfXuxkm#DY{PFFh3UKhIfFXavMo+t6;4a<7xNoM6fTFp&tVQb#-#Z zpOaRJeA` z8~t&f$J)uGq3P^2)Ehq%FZunXIp^>3p0U&5zmIbK{rDM(?lcox)nmH7o^fQ{7J^$3 z>flhTRxsBE_RTv+|1LbC&q0H^b=xUioI9AkhOS|^m}0K1m*?@BB3wR_2%5_`u=}BG zF|O)28LhqqTh@P~X9o|#@$wz?@X>t8?973>`$|=}myN=kFN`@&#|_7YX7Ih=1MooB zGb+mP;jR6o`1C_*z8gNBpG_#BvK!`@a?=3Zk^qP--3g(2b`$Uzgvw1?dSE%047~I}v z!@XzSnPiOw_#>@?W-nqFm8p@-=rs1vL191j-VJ*r>xLvcz` z2z2d8hv-yaoZ!ERzsYaI(1xS<+dde#xY_a6950+FBf!aqDlx)vHAi&ar@IEvg}$Cq z!0K&eGRBNUrMtDrm0D0V{R4GmuSZQS8IJ6e0F5pg>^;MSEseT+2!>cv-TqzJ`t%O> z!%DjGG6x@LSmG($+cqoZWLalq8xD3W;sNLHu!hVDQi5f8y!R#1`luRIFV za%v&`?^7F#zFIurzMP(ECora2fzRgh_DvX|4J8FYZt+=zPfnz lmIt{GSOw4AmgAU(o7k%{g)bgb7TWs`$Fz$UuxFVC{vW45N`n9Z literal 0 HcmV?d00001 diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index 0fc6d07bd1..3946f414f2 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -21,7 +21,6 @@ import numpy as np from art.utils import load_dataset - from tests.utils import ARTTestException @@ -263,11 +262,13 @@ def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10 def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10_data, ablation): """ Assert implementations matches original with a forward pass through the same model architecture. - Note, there are some differences in architecture between the same model names in timm vs the original implementation. + There are some differences in architecture between the same model names in timm vs the original implementation. We use vit_base_patch16_224 which matches. """ import torch + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing from art.estimators.certification.derandomized_smoothing.ablators import ( @@ -303,11 +304,12 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 ablated = ablator.forward(cifar_data, column_pos=10) madry_preds = torch.load( os.path.join( - os.path.dirname(os.path.dirname(__file__)), "certification/smooth_vit_results/madry_preds_column.pt" + os.path.dirname(os.path.dirname(__file__)), + "certification/smooth_vit/smooth_vit_results/madry_preds_column.pt", ) ) art_preds = art_model.model(ablated) - assert torch.allclose(madry_preds, art_preds, rtol=1e-04, atol=1e-04) + assert torch.allclose(madry_preds.to(device), art_preds, rtol=1e-04, atol=1e-04) elif ablation == "block": ablator = BlockAblatorPyTorch( @@ -321,11 +323,12 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 ablated = ablator.forward(cifar_data, column_pos=10, row_pos=28) madry_preds = torch.load( os.path.join( - os.path.dirname(os.path.dirname(__file__)), "certification/smooth_vit_results/madry_preds_block.pt" + os.path.dirname(os.path.dirname(__file__)), + "certification/smooth_vit/smooth_vit_results/madry_preds_block.pt", ) ) art_preds = art_model.model(ablated) - assert torch.allclose(madry_preds, art_preds, rtol=1e-04, atol=1e-04) + assert torch.allclose(madry_preds.to(device), art_preds, rtol=1e-04, atol=1e-04) @pytest.mark.only_with_platform("pytorch") @@ -336,49 +339,10 @@ def test_certification_equivalence(art_warning, fix_get_mnist_data, fix_get_cifa way by doing a full end to end prediction and certification test over the data. """ import torch - import sys - import types - - from torch.utils.data import Dataset - - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - experiment_file_directory = "smooth_vit_tests" - - class ArgClass: - def __init__(self): - self.certify_patch_size = 4 - self.certify_ablation_size = 4 - self.certify_stride = 1 - self.dataset = "cifar10" - self.certify_out_dir = "./" - self.exp_name = experiment_file_directory - if ablation == "column": - self.certify_mode = "col" - if ablation == "block": - self.certify_mode = "block" - self.batch_id = None - - class DataSet(Dataset): - def __init__(self, x, y): - self.x = x - self.y = y - - def __len__(self): - return len(self.y) - - def __getitem__(self, idx): - return self.x[idx], self.y[idx] from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing - import shutil - from torch.utils.data import DataLoader - if os.path.exists(experiment_file_directory): - shutil.rmtree(experiment_file_directory) - - os.system("git clone https://github.com/MadryLab/smoothed-vit") - sys.path.append("smoothed-vit/src/utils/") - from smoothing import certify + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") art_model = PyTorchDeRandomizedSmoothing( model="vit_small_patch16_224", @@ -394,90 +358,45 @@ def __getitem__(self, idx): verbose=False, ) - # TODO: Look into incorporating this model into the CI runs rather than just local testing. - if os.path.isfile("vit_small_patch16_224_block.pt"): - art_model.model.load_state_dict(torch.load("vit_small_patch16_224_block.pt")) - - class WrappedModel(torch.nn.Module): - """ - Original implementation requires to return a tuple. We add a dummy return to satisfy this. - """ - - def __init__(self, my_model): - super().__init__() - self.model = my_model - self.upsample = torch.nn.Upsample(scale_factor=224 / 32) - - def forward(self, x): - if x.shape[-1] != 224: - x = self.upsample(x) - x = self.model(x) - return x, "filler_arg" - - # Replacement function for .cuda() to enable original code to run without gpu. - def _cuda(self): - return self - - class MyDataloader(Dataset): - """ - Original implementation made use of .cuda() without device checks. Thus, for cpu only machines - (such as those run for ART CI checks) the test will fail. Here we override .cuda() for the - instances to just return self. - """ - - def __init__(self, x, y): - self.x = x - self.y = y - self.bsize = 2 - - def __len__(self): - return 2 - - def __getitem__(self, idx): - if idx >= 2: - raise IndexError - else: - x = self.x[idx * self.bsize : idx * self.bsize + self.bsize] - y = self.y[idx * self.bsize : idx * self.bsize + self.bsize] - - x.cuda = types.MethodType(_cuda, x) - y.cuda = types.MethodType(_cuda, y) - return x, y + head = { + "weight": torch.tensor(np.load("smooth_vit/smooth_vit_weights/head_weight.npy")).to(device), + "bias": torch.tensor(np.load("smooth_vit/smooth_vit_weights/head_bias.npy")).to(device), + } + art_model.model.head.load_state_dict(head) if torch.cuda.is_available(): num_to_fetch = 100 else: - num_to_fetch = 4 + num_to_fetch = 10 cifar_data = torch.from_numpy(fix_get_cifar10_data[0][:num_to_fetch]).to(device) cifar_labels = torch.from_numpy(fix_get_cifar10_data[1][:num_to_fetch]).to(device) - if torch.cuda.is_available(): - dataset = DataSet(cifar_data, cifar_labels) - validation_loader = DataLoader(dataset, batch_size=num_to_fetch) - else: - validation_loader = MyDataloader(cifar_data, cifar_labels) - - args = ArgClass() - - model = WrappedModel(my_model=art_model.model) - certify(args=args, model=model, validation_loader=validation_loader, store=None) - summary = torch.load(experiment_file_directory + "/m4_s4_summary.pth") - acc, cert_acc = art_model.eval_and_certify( x=cifar_data.cpu().numpy(), y=cifar_labels.cpu().numpy(), batch_size=num_to_fetch, size_to_certify=4 ) - assert torch.allclose(torch.tensor(cert_acc), torch.tensor(summary["cert_acc"])) - assert torch.tensor(acc) == torch.tensor(summary["smooth_acc"]) - upsample = torch.nn.Upsample(scale_factor=224 / 32) cifar_data = upsample(cifar_data) acc_non_ablation = art_model.model(cifar_data) acc_non_ablation = art_model.get_accuracy(acc_non_ablation, cifar_labels) - assert np.allclose(acc_non_ablation.astype(float), summary["acc"]) - sys.path.remove("smoothed-vit/src/utils/") + if torch.cuda.is_available(): + if ablation == "column": + assert np.allclose(cert_acc.cpu().numpy(), 0.29) + assert np.allclose(acc.cpu().numpy(), 0.57) + else: + assert np.allclose(cert_acc.cpu().numpy(), 0.16) + assert np.allclose(acc.cpu().numpy(), 0.24) + assert np.allclose(acc_non_ablation, 0.52) + else: + if ablation == "column": + assert np.allclose(cert_acc.cpu().numpy(), 0.30) + assert np.allclose(acc.cpu().numpy(), 0.70) + else: + assert np.allclose(cert_acc.cpu().numpy(), 0.20) + assert np.allclose(acc.cpu().numpy(), 0.20) + assert np.allclose(acc_non_ablation, 0.60) @pytest.mark.only_with_platform("pytorch") From 8349f96a19babd4228d2f8ad816932006b439351 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 30 Aug 2023 15:11:03 +0000 Subject: [PATCH 49/55] fixing lingvo workflow. Adding pytorch check prior to import Signed-off-by: GiulioZizzo --- .github/workflows/ci-lingvo.yml | 2 +- .../derandomized_smoothing/__init__.py | 1 - .../derandomized_smoothing/ablators/__init__.py | 13 +++++++++---- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index ba433c6a7f..631f3f539f 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -50,7 +50,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;^timm/d' requirements_test.txt) + pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d' requirements_test.txt) pip install scipy==1.5.4 pip install matplotlib==3.3.4 pip install pandas==1.1.5 diff --git a/art/estimators/certification/derandomized_smoothing/__init__.py b/art/estimators/certification/derandomized_smoothing/__init__.py index 64397cbb5d..69753f4f39 100644 --- a/art/estimators/certification/derandomized_smoothing/__init__.py +++ b/art/estimators/certification/derandomized_smoothing/__init__.py @@ -1,6 +1,5 @@ """ DeRandomized smoothing estimators. """ -# from art.estimators.certification.derandomized_smoothing.derandomized import DeRandomizedSmoothingMixin from art.estimators.certification.derandomized_smoothing.pytorch import PyTorchDeRandomizedSmoothing from art.estimators.certification.derandomized_smoothing.tensorflow import TensorFlowV2DeRandomizedSmoothing diff --git a/art/estimators/certification/derandomized_smoothing/ablators/__init__.py b/art/estimators/certification/derandomized_smoothing/ablators/__init__.py index d727d2efb7..80842ec043 100644 --- a/art/estimators/certification/derandomized_smoothing/ablators/__init__.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/__init__.py @@ -1,8 +1,13 @@ """ This module contains the ablators for the certified smoothing approaches. """ -from art.estimators.certification.derandomized_smoothing.ablators.pytorch import ( - ColumnAblatorPyTorch, - BlockAblatorPyTorch, -) +import importlib + from art.estimators.certification.derandomized_smoothing.ablators.tensorflow import ColumnAblator, BlockAblator + +if importlib.util.find_spec("torch") is not None: + from art.estimators.certification.derandomized_smoothing.ablators.pytorch import ( + ColumnAblatorPyTorch, + BlockAblatorPyTorch, + ) + From 062be5e2c5bd08cbdd38c7dc1533bfaa20698250 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Wed, 30 Aug 2023 23:46:25 +0100 Subject: [PATCH 50/55] Correcting filepath in test. Adding expected values for training test. Signed-off-by: GiulioZizzo --- .../ablators/__init__.py | 1 - .../cumulative_predictions.npy | Bin 0 -> 2128 bytes .../certification/test_smooth_vit.py | 51 +++++++++++++++++- 3 files changed, 49 insertions(+), 3 deletions(-) create mode 100644 tests/estimators/certification/smooth_vit/smooth_vit_results/cumulative_predictions.npy diff --git a/art/estimators/certification/derandomized_smoothing/ablators/__init__.py b/art/estimators/certification/derandomized_smoothing/ablators/__init__.py index 80842ec043..23715d4aba 100644 --- a/art/estimators/certification/derandomized_smoothing/ablators/__init__.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/__init__.py @@ -10,4 +10,3 @@ ColumnAblatorPyTorch, BlockAblatorPyTorch, ) - diff --git a/tests/estimators/certification/smooth_vit/smooth_vit_results/cumulative_predictions.npy b/tests/estimators/certification/smooth_vit/smooth_vit_results/cumulative_predictions.npy new file mode 100644 index 0000000000000000000000000000000000000000..71c585b2c558af5066806b155970718f22d6c66a GIT binary patch literal 2128 zcmbV`=|9zZ8-S6rMTHh0S+bmCImr@I;`hCeG*J{qq(}~m>>i zh&4$kEq)p>q3s8<9-(06!y)yb6j63=3;n&v9FsND@UftT43x%#%UfgM9j%8N$!u^B zdB{8bn zoGMW*))jm>lO6t@df?Ty6q187NMG1`6k1*eeyRo@693AISICgU-CR6ry^_eS(V|Vd z4dhzDC==QtBFi;0m}Z|U*vh!#;|4X%NHb*Y>Q!JQGJ~9eVwlRTfg)d)IlLeg%ktgm z^BqQbSa~;L7cM~Sb$mE4<%nh<0p^>@!daaaBzV&V%7QPJB7tK`yYSZV+mO}DWQiKV~}N5HP%)(~Ng7IG{%(3Zx)PIe` ztUv8&dT%crN+|{Bi#7+8VKBFZm};H?*$Ez0uD(gQ_FN3t@rQ78 zSrm?%fZEPJ*pQo#(l6A}*R&S%a}DUFcyrV#)yJy~c<@oA%~$>)peK4&@IaY}Jm0`D zdK*KaA~OiWzb+#podKd$ZiF$mbsf#&Qs^DH19$IxMTF~3$=NOjazAE+`~Vl883mHQ zyfh4tmq4A<)#KkyEFhn^OLRuc*`f0~MZB|c0K@kCfs|w_ky=p) z*xv>-vd%DDqz+1QmvQpvE9lSO!4+y^K!&5JDd@ZP? z2m9_Z`8WBrwb%)Rf@>L0#t)FT?*_ySoS{L3E~Mp*fZB-aNp*P^oom$r{}d4kmCU2T zi`1#0bS1b*tw4W=OL#3`0$hhQ;oY5DaQiM6EL1B%i@%c*ef#g&CWX1l1<)A07p@j6 z;%|%B5ED5UEcxR!EtU(X0n8MA^hOF)o9m#^^aHu&Qx3XS;TYE4-|<>40Yyz(q$M>9 z#X3u~C#-=#tqj196Af#bZg_emvm@hc621*8r!flsFjIM-WIJe( z;gkFEiTPC`R_= Date: Mon, 4 Sep 2023 16:27:50 +0000 Subject: [PATCH 51/55] review edits Signed-off-by: GiulioZizzo --- .../ablators/pytorch.py | 5 + .../derandomized_smoothing/derandomized.py | 4 +- .../derandomized_smoothing/pytorch.py | 139 ++++- .../vision_transformers/pytorch.py | 166 ------ .../vision_transformers/vit.py | 4 +- .../certification/test_smooth_vit.py | 480 +++++++++--------- .../certification/smooth_vit}/head_bias.npy | Bin .../certification/smooth_vit}/head_weight.npy | Bin 8 files changed, 382 insertions(+), 416 deletions(-) delete mode 100644 art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py rename {tests/estimators/certification/smooth_vit/smooth_vit_weights => utils/resources/models/certification/smooth_vit}/head_bias.npy (100%) rename {tests/estimators/certification/smooth_vit/smooth_vit_weights => utils/resources/models/certification/smooth_vit}/head_weight.npy (100%) diff --git a/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py b/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py index 356849ddb6..1f1ad1aeec 100644 --- a/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py @@ -83,8 +83,11 @@ def __init__( :param channels_first: If the input is in channels first format. Currently required to be True. :param mode: If we are running the algorithm using a CNN or VIT. :param to_reshape: If the input requires reshaping. + :param ablation_mode: The type of ablation to perform. :param original_shape: Original shape of the input. :param output_shape: Input shape expected by the ViT. Usually means upscaling the input to 224 x 224. + :param algorithm: Either 'salman2021' or 'levine2020'. + :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. """ super().__init__() @@ -256,6 +259,8 @@ def __init__( :param to_reshape: If the input requires reshaping. :param original_shape: Original shape of the input. :param output_shape: Input shape expected by the ViT. Usually means upscaling the input to 224 x 224. + :param algorithm: Either 'salman2021' or 'levine2020'. + :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. """ super().__init__() diff --git a/art/estimators/certification/derandomized_smoothing/derandomized.py b/art/estimators/certification/derandomized_smoothing/derandomized.py index 5b055cf20d..e41435da39 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized.py @@ -24,7 +24,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals -from abc import ABC +from abc import ABC, abstractmethod import numpy as np @@ -43,6 +43,7 @@ def __init__( """ super().__init__(*args, **kwargs) # type: ignore + @abstractmethod def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: """ Perform prediction for a batch of inputs. @@ -54,6 +55,7 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo """ raise NotImplementedError + @abstractmethod def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: """ Performs cumulative predictions over every ablation location diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 967234bc34..855540bef7 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -43,7 +43,6 @@ from art.estimators.classification.pytorch import PyTorchClassifier from art.estimators.certification.derandomized_smoothing.derandomized import DeRandomizedSmoothingMixin -from art.estimators.certification.derandomized_smoothing.vision_transformers.pytorch import PyTorchSmoothedViT from art.utils import check_and_transform_label_format if TYPE_CHECKING: @@ -58,7 +57,7 @@ logger = logging.getLogger(__name__) -class PyTorchDeRandomizedSmoothing(PyTorchSmoothedViT, DeRandomizedSmoothingMixin, PyTorchClassifier): +class PyTorchDeRandomizedSmoothing(DeRandomizedSmoothingMixin, PyTorchClassifier): """ Interface class for the two De-randomized smoothing approaches supported by ART for pytorch. @@ -115,8 +114,7 @@ def __init__( :param load_pretrained: ViT Specific. If to load a pretrained model matching the ViT name. Will only affect the ViT if a string name is passed to model rather than a ViT directly. :param optimizer: The optimizer used to train the classifier. - :param ablation_type: Specific to Levine et al. The type of ablation to perform, - must be either "column" or "block" + :param ablation_type: The type of ablation to perform. Either "column", "row", or "block" :param threshold: Specific to Levine et al. The minimum threshold to count a prediction. :param logits: Specific to Levine et al. If the model returns logits or normalized probabilities :param channels_first: Set channels first or last. @@ -134,9 +132,6 @@ def __init__( import torch - logging.basicConfig() - logger.setLevel(logging.INFO) - if not channels_first: raise ValueError("Channels must be set to first") logger.info("Running algorithm: %s", algorithm) @@ -149,7 +144,9 @@ def __init__( if isinstance(model, (VisionTransformer, str)): import timm - from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT + from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import ( + PyTorchVisionTransformer, + ) if replace_last_layer is None: raise ValueError("If using ViTs please specify if the last layer should be replaced") @@ -158,7 +155,7 @@ def __init__( tmp_func = timm.models.vision_transformer._create_vision_transformer # overrride with ART's ViT creation function - timm.models.vision_transformer._create_vision_transformer = self.art_create_vision_transformer + timm.models.vision_transformer._create_vision_transformer = self.create_vision_transformer if isinstance(model, str): model = timm.create_model( model, pretrained=load_pretrained, drop_tokens=drop_tokens, device_type=device_type @@ -204,7 +201,7 @@ def __init__( converted_optimizer.load_state_dict(opt_state_dict) self.to_reshape = False - if not isinstance(model, PyTorchViT): + if not isinstance(model, PyTorchVisionTransformer): raise ValueError("Vision transformer is not of PyTorchViT. Error occurred in PyTorchViT creation.") if model.default_cfg["input_size"][0] != input_shape[0]: @@ -307,7 +304,127 @@ def __init__( mode=self.mode, ) else: - raise ValueError(f"ablation_type of {ablation_type} not recognized. Must be either column or block") + raise ValueError(f"ablation_type of {ablation_type} not recognized. Must be either column, row, or block") + + @classmethod + def get_models(cls, generate_from_null: bool = False) -> List[str]: + """ + Return the supported model names to the user. + + :param generate_from_null: If to re-check the creation of all the ViTs in timm from scratch. + :return: A list of compatible models + """ + import timm + import torch + + supported_models = [ + "vit_base_patch8_224", + "vit_base_patch16_18x2_224", + "vit_base_patch16_224", + "vit_base_patch16_224_miil", + "vit_base_patch16_384", + "vit_base_patch16_clip_224", + "vit_base_patch16_clip_384", + "vit_base_patch16_gap_224", + "vit_base_patch16_plus_240", + "vit_base_patch16_rpn_224", + "vit_base_patch16_xp_224", + "vit_base_patch32_224", + "vit_base_patch32_384", + "vit_base_patch32_clip_224", + "vit_base_patch32_clip_384", + "vit_base_patch32_clip_448", + "vit_base_patch32_plus_256", + "vit_giant_patch14_224", + "vit_giant_patch14_clip_224", + "vit_gigantic_patch14_224", + "vit_gigantic_patch14_clip_224", + "vit_huge_patch14_224", + "vit_huge_patch14_clip_224", + "vit_huge_patch14_clip_336", + "vit_huge_patch14_xp_224", + "vit_large_patch14_224", + "vit_large_patch14_clip_224", + "vit_large_patch14_clip_336", + "vit_large_patch14_xp_224", + "vit_large_patch16_224", + "vit_large_patch16_384", + "vit_large_patch32_224", + "vit_large_patch32_384", + "vit_medium_patch16_gap_240", + "vit_medium_patch16_gap_256", + "vit_medium_patch16_gap_384", + "vit_small_patch16_18x2_224", + "vit_small_patch16_36x1_224", + "vit_small_patch16_224", + "vit_small_patch16_384", + "vit_small_patch32_224", + "vit_small_patch32_384", + "vit_tiny_patch16_224", + "vit_tiny_patch16_384", + ] + + if not generate_from_null: + return supported_models + + supported = [] + unsupported = [] + + models = timm.list_models("vit_*") + pbar = tqdm(models) + + # store in case not re-assigned in the model creation due to unsuccessful creation + tmp_func = timm.models.vision_transformer._create_vision_transformer # pylint: disable=W0212 + + for model in pbar: + pbar.set_description(f"Testing {model} creation") + try: + _ = cls( + model=model, + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + load_pretrained=False, + replace_last_layer=True, + verbose=False, + ) + supported.append(model) + except (TypeError, AttributeError): + unsupported.append(model) + timm.models.vision_transformer._create_vision_transformer = tmp_func # pylint: disable=W0212 + + if supported != supported_models: + logger.warning( + "Difference between the generated and fixed model list. Although not necessarily " + "an error, this may point to the timm library being updated." + ) + + return supported + + @staticmethod + def create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> "PyTorchVisionTransformer": + """ + Creates a vision transformer using PyTorchViT which controls the forward pass of the model + + :param variant: The name of the vision transformer to load + :param pretrained: If to load pre-trained weights + :return: A ViT with the required methods needed for ART + """ + + from timm.models._builder import build_model_with_cfg + from timm.models.vision_transformer import checkpoint_filter_fn + from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchVisionTransformer + + return build_model_with_cfg( + PyTorchVisionTransformer, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs, + ) def fit( # pylint: disable=W0221 self, diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py deleted file mode 100644 index a8850ebed4..0000000000 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py +++ /dev/null @@ -1,166 +0,0 @@ -# MIT License -# -# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 -# -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the -# Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -""" -This module implements Certified Patch Robustness via Smoothed Vision Transformers - -| Paper link Accepted version: - https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf - -| Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf -""" -from __future__ import absolute_import, division, print_function, unicode_literals - -import logging -from typing import List, TYPE_CHECKING - - -if TYPE_CHECKING: - from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT - -logger = logging.getLogger(__name__) - - -class PyTorchSmoothedViT: - """ - Implementation of Certified Patch Robustness via Smoothed Vision Transformers - - | Paper link Accepted version: - https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf - - | Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf - """ - - def __init__(self, **kwargs): - super().__init__(**kwargs) - - @classmethod - def get_models(cls, generate_from_null: bool = False) -> List[str]: - """ - Return the supported model names to the user. - - :param generate_from_null: If to re-check the creation of all the ViTs in timm from scratch. - :return: A list of compatible models - """ - import timm - import torch - - logging.basicConfig() - logger.setLevel(logging.INFO) - - supported_models = [ - "vit_base_patch8_224", - "vit_base_patch16_18x2_224", - "vit_base_patch16_224", - "vit_base_patch16_224_miil", - "vit_base_patch16_384", - "vit_base_patch16_clip_224", - "vit_base_patch16_clip_384", - "vit_base_patch16_gap_224", - "vit_base_patch16_plus_240", - "vit_base_patch16_rpn_224", - "vit_base_patch16_xp_224", - "vit_base_patch32_224", - "vit_base_patch32_384", - "vit_base_patch32_clip_224", - "vit_base_patch32_clip_384", - "vit_base_patch32_clip_448", - "vit_base_patch32_plus_256", - "vit_giant_patch14_224", - "vit_giant_patch14_clip_224", - "vit_gigantic_patch14_224", - "vit_gigantic_patch14_clip_224", - "vit_huge_patch14_224", - "vit_huge_patch14_clip_224", - "vit_huge_patch14_clip_336", - "vit_huge_patch14_xp_224", - "vit_large_patch14_224", - "vit_large_patch14_clip_224", - "vit_large_patch14_clip_336", - "vit_large_patch14_xp_224", - "vit_large_patch16_224", - "vit_large_patch16_384", - "vit_large_patch32_224", - "vit_large_patch32_384", - "vit_medium_patch16_gap_240", - "vit_medium_patch16_gap_256", - "vit_medium_patch16_gap_384", - "vit_small_patch16_18x2_224", - "vit_small_patch16_36x1_224", - "vit_small_patch16_224", - "vit_small_patch16_384", - "vit_small_patch32_224", - "vit_small_patch32_384", - "vit_tiny_patch16_224", - "vit_tiny_patch16_384", - ] - - if not generate_from_null: - return supported_models - - supported = [] - unsupported = [] - - models = timm.list_models("vit_*") - for model in models: - logger.info("Testing %s creation", model) - try: - _ = cls( - model=model, - loss=torch.nn.CrossEntropyLoss(), - optimizer=torch.optim.SGD, - optimizer_params={"lr": 0.01}, - input_shape=(3, 32, 32), - nb_classes=10, - ablation_size=4, - load_pretrained=False, - replace_last_layer=True, - verbose=False, - ) - supported.append(model) - except (TypeError, AttributeError): - unsupported.append(model) - - if supported != supported_models: - logger.warning( - "Difference between the generated and fixed model list. Although not necessarily " - "an error, this may point to the timm library being updated." - ) - - return supported - - @staticmethod - def art_create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> "PyTorchViT": - """ - Creates a vision transformer using PyTorchViT which controls the forward pass of the model - - :param variant: The name of the vision transformer to load - :param pretrained: If to load pre-trained weights - :return: A ViT with the required methods needed for ART - """ - - from timm.models._builder import build_model_with_cfg - from timm.models.vision_transformer import checkpoint_filter_fn - from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT - - return build_model_with_cfg( - PyTorchViT, - variant, - pretrained, - pretrained_filter_fn=checkpoint_filter_fn, - **kwargs, - ) diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py index 9403785541..cfa788a573 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py @@ -115,9 +115,9 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: raise ValueError("Projection layer not yet created.") -class PyTorchViT(VisionTransformer): +class PyTorchVisionTransformer(VisionTransformer): """ - Class to control the forward pass of the ViT. + Model-specific class to define the forward pass of the Vision Transformer (ViT) in PyTorch. """ # Make as a class attribute to avoid being included in the diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_smooth_vit.py index 9f847ca034..414e9d6f46 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_smooth_vit.py @@ -230,7 +230,7 @@ def test_pytorch_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data) np.load( os.path.join( os.path.dirname(os.path.dirname(__file__)), - "certification/smooth_vit/smooth_vit_weights/head_weight.npy", + "../../utils/resources/models/certification/smooth_vit/head_weight.npy", ) ) ).to(device), @@ -238,7 +238,7 @@ def test_pytorch_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data) np.load( os.path.join( os.path.dirname(os.path.dirname(__file__)), - "certification/smooth_vit/smooth_vit_weights/head_bias.npy", + "../../utils/resources/models/certification/smooth_vit/head_bias.npy", ) ) ).to(device), @@ -311,57 +311,59 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 cifar_data = fix_get_cifar10_data[0][:50] torch.manual_seed(1234) - - art_model = PyTorchDeRandomizedSmoothing( - model="vit_base_patch16_224", - loss=torch.nn.CrossEntropyLoss(), - optimizer=torch.optim.SGD, - optimizer_params={"lr": 0.01}, - input_shape=(3, 32, 32), - nb_classes=10, - ablation_size=4, - load_pretrained=True, - replace_last_layer=True, - verbose=False, - ) - - if ablation == "column": - ablator = ColumnAblatorPyTorch( + try: + art_model = PyTorchDeRandomizedSmoothing( + model="vit_base_patch16_224", + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, ablation_size=4, - channels_first=True, - to_reshape=True, - mode="ViT", - original_shape=(3, 32, 32), - output_shape=(3, 224, 224), - ) - ablated = ablator.forward(cifar_data, column_pos=10) - madry_preds = torch.load( - os.path.join( - os.path.dirname(os.path.dirname(__file__)), - "certification/smooth_vit/smooth_vit_results/madry_preds_column.pt", - ) + load_pretrained=True, + replace_last_layer=True, + verbose=False, ) - art_preds = art_model.model(ablated) - assert torch.allclose(madry_preds.to(device), art_preds, rtol=1e-04, atol=1e-04) - elif ablation == "block": - ablator = BlockAblatorPyTorch( - ablation_size=4, - channels_first=True, - to_reshape=True, - original_shape=(3, 32, 32), - output_shape=(3, 224, 224), - mode="ViT", - ) - ablated = ablator.forward(cifar_data, column_pos=10, row_pos=28) - madry_preds = torch.load( - os.path.join( - os.path.dirname(os.path.dirname(__file__)), - "certification/smooth_vit/smooth_vit_results/madry_preds_block.pt", + if ablation == "column": + ablator = ColumnAblatorPyTorch( + ablation_size=4, + channels_first=True, + to_reshape=True, + mode="ViT", + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), ) - ) - art_preds = art_model.model(ablated) - assert torch.allclose(madry_preds.to(device), art_preds, rtol=1e-04, atol=1e-04) + ablated = ablator.forward(cifar_data, column_pos=10) + madry_preds = torch.load( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "certification/smooth_vit/smooth_vit_results/madry_preds_column.pt", + ) + ) + art_preds = art_model.model(ablated) + assert torch.allclose(madry_preds.to(device), art_preds, rtol=1e-04, atol=1e-04) + + elif ablation == "block": + ablator = BlockAblatorPyTorch( + ablation_size=4, + channels_first=True, + to_reshape=True, + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + mode="ViT", + ) + ablated = ablator.forward(cifar_data, column_pos=10, row_pos=28) + madry_preds = torch.load( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "certification/smooth_vit/smooth_vit_results/madry_preds_block.pt", + ) + ) + art_preds = art_model.model(ablated) + assert torch.allclose(madry_preds.to(device), art_preds, rtol=1e-04, atol=1e-04) + except ARTTestException as e: + art_warning(e) @pytest.mark.only_with_platform("pytorch") @@ -376,233 +378,239 @@ def test_certification_equivalence(art_warning, fix_get_mnist_data, fix_get_cifa from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + try: + art_model = PyTorchDeRandomizedSmoothing( + model="vit_small_patch16_224", + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_type=ablation, + ablation_size=4, + load_pretrained=True, + replace_last_layer=True, + verbose=False, + ) - art_model = PyTorchDeRandomizedSmoothing( - model="vit_small_patch16_224", - loss=torch.nn.CrossEntropyLoss(), - optimizer=torch.optim.SGD, - optimizer_params={"lr": 0.01}, - input_shape=(3, 32, 32), - nb_classes=10, - ablation_type=ablation, - ablation_size=4, - load_pretrained=True, - replace_last_layer=True, - verbose=False, - ) - - head = { - "weight": torch.tensor( - np.load( - os.path.join( - os.path.dirname(os.path.dirname(__file__)), - "certification/smooth_vit/smooth_vit_weights/head_weight.npy", + head = { + "weight": torch.tensor( + np.load( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "../../utils/resources/models/certification/smooth_vit/head_weight.npy", + ) ) - ) - ).to(device), - "bias": torch.tensor( - np.load( - os.path.join( - os.path.dirname(os.path.dirname(__file__)), - "certification/smooth_vit/smooth_vit_weights/head_bias.npy", + ).to(device), + "bias": torch.tensor( + np.load( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "../../utils/resources/models/certification/smooth_vit/head_bias.npy", + ) ) - ) - ).to(device), - } - art_model.model.head.load_state_dict(head) - - if torch.cuda.is_available(): - num_to_fetch = 100 - else: - num_to_fetch = 10 + ).to(device), + } + art_model.model.head.load_state_dict(head) - cifar_data = torch.from_numpy(fix_get_cifar10_data[0][:num_to_fetch]).to(device) - cifar_labels = torch.from_numpy(fix_get_cifar10_data[1][:num_to_fetch]).to(device) + if torch.cuda.is_available(): + num_to_fetch = 100 + else: + num_to_fetch = 10 - acc, cert_acc = art_model.eval_and_certify( - x=cifar_data.cpu().numpy(), y=cifar_labels.cpu().numpy(), batch_size=num_to_fetch, size_to_certify=4 - ) + cifar_data = torch.from_numpy(fix_get_cifar10_data[0][:num_to_fetch]).to(device) + cifar_labels = torch.from_numpy(fix_get_cifar10_data[1][:num_to_fetch]).to(device) - upsample = torch.nn.Upsample(scale_factor=224 / 32) - cifar_data = upsample(cifar_data) - acc_non_ablation = art_model.model(cifar_data) - acc_non_ablation = art_model.get_accuracy(acc_non_ablation, cifar_labels) + acc, cert_acc = art_model.eval_and_certify( + x=cifar_data.cpu().numpy(), y=cifar_labels.cpu().numpy(), batch_size=num_to_fetch, size_to_certify=4 + ) - if torch.cuda.is_available(): - if ablation == "column": - assert np.allclose(cert_acc.cpu().numpy(), 0.29) - assert np.allclose(acc.cpu().numpy(), 0.57) - else: - assert np.allclose(cert_acc.cpu().numpy(), 0.16) - assert np.allclose(acc.cpu().numpy(), 0.24) - assert np.allclose(acc_non_ablation, 0.52) - else: - if ablation == "column": - assert np.allclose(cert_acc.cpu().numpy(), 0.30) - assert np.allclose(acc.cpu().numpy(), 0.70) + upsample = torch.nn.Upsample(scale_factor=224 / 32) + cifar_data = upsample(cifar_data) + acc_non_ablation = art_model.model(cifar_data) + acc_non_ablation = art_model.get_accuracy(acc_non_ablation, cifar_labels) + + if torch.cuda.is_available(): + if ablation == "column": + assert np.allclose(cert_acc.cpu().numpy(), 0.29) + assert np.allclose(acc.cpu().numpy(), 0.57) + else: + assert np.allclose(cert_acc.cpu().numpy(), 0.16) + assert np.allclose(acc.cpu().numpy(), 0.24) + assert np.allclose(acc_non_ablation, 0.52) else: - assert np.allclose(cert_acc.cpu().numpy(), 0.20) - assert np.allclose(acc.cpu().numpy(), 0.20) - assert np.allclose(acc_non_ablation, 0.60) + if ablation == "column": + assert np.allclose(cert_acc.cpu().numpy(), 0.30) + assert np.allclose(acc.cpu().numpy(), 0.70) + else: + assert np.allclose(cert_acc.cpu().numpy(), 0.20) + assert np.allclose(acc.cpu().numpy(), 0.20) + assert np.allclose(acc_non_ablation, 0.60) + except ARTTestException as e: + art_warning(e) @pytest.mark.only_with_platform("pytorch") -def test_equivalence(fix_get_cifar10_data): +def test_equivalence(art_warning, fix_get_cifar10_data): import torch from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing - from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchViT + from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchVisionTransformer device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - class MadrylabImplementations: - """ - Code adapted from the implementation in https://github.com/MadryLab/smoothed-vit - to check against our own functionality. - - Original License: + try: - MIT License + class MadrylabImplementations: + """ + Code adapted from the implementation in https://github.com/MadryLab/smoothed-vit + to check against our own functionality. - Copyright (c) 2021 Madry Lab + Original License: - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: + MIT License - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. + Copyright (c) 2021 Madry Lab - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: - """ + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. - def __init__(self): - pass + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. - @classmethod - def token_dropper(cls, x, mask): - """ - The implementation of dropping tokens has been done slightly differently in this tool. - Here we check that it is equivalent to the original implementation """ - class MaskProcessor(torch.nn.Module): - def __init__(self, patch_size=16): - super().__init__() - self.avg_pool = torch.nn.AvgPool2d(patch_size) - - def forward(self, ones_mask): - B = ones_mask.shape[0] - ones_mask = ones_mask[0].unsqueeze(0) # take the first mask - ones_mask = self.avg_pool(ones_mask)[0] - ones_mask = torch.where(ones_mask.view(-1) > 0)[0] + 1 - ones_mask = torch.cat([torch.IntTensor(1).fill_(0).to(device), ones_mask]).unsqueeze(0) - ones_mask = ones_mask.expand(B, -1) - return ones_mask - - mask_processor = MaskProcessor() - patch_mask = mask_processor(mask) - - # x = self.pos_drop(x) # B, N, C - if patch_mask is not None: - # patch_mask is B, K - B, N, C = x.shape - if len(patch_mask.shape) == 1: # not a separate one per batch - x = x[:, patch_mask] - else: - patch_mask = patch_mask.unsqueeze(-1).expand(-1, -1, C) - x = torch.gather(x, 1, patch_mask) - return x - - @classmethod - def embedder(cls, x, pos_embed, cls_token): + def __init__(self): + pass + + @classmethod + def token_dropper(cls, x, mask): + """ + The implementation of dropping tokens has been done slightly differently in this tool. + Here we check that it is equivalent to the original implementation + """ + + class MaskProcessor(torch.nn.Module): + def __init__(self, patch_size=16): + super().__init__() + self.avg_pool = torch.nn.AvgPool2d(patch_size) + + def forward(self, ones_mask): + B = ones_mask.shape[0] + ones_mask = ones_mask[0].unsqueeze(0) # take the first mask + ones_mask = self.avg_pool(ones_mask)[0] + ones_mask = torch.where(ones_mask.view(-1) > 0)[0] + 1 + ones_mask = torch.cat([torch.IntTensor(1).fill_(0).to(device), ones_mask]).unsqueeze(0) + ones_mask = ones_mask.expand(B, -1) + return ones_mask + + mask_processor = MaskProcessor() + patch_mask = mask_processor(mask) + + # x = self.pos_drop(x) # B, N, C + if patch_mask is not None: + # patch_mask is B, K + B, N, C = x.shape + if len(patch_mask.shape) == 1: # not a separate one per batch + x = x[:, patch_mask] + else: + patch_mask = patch_mask.unsqueeze(-1).expand(-1, -1, C) + x = torch.gather(x, 1, patch_mask) + return x + + @classmethod + def embedder(cls, x, pos_embed, cls_token): + """ + NB, original code used the pos embed from the divit rather than vit + (which we pull from our model) which we use here. + + From timm vit: + self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02) + + From timm dvit: + self.pos_embed = nn.Parameter(torch.zeros(1, + self.patch_embed.num_patches + self.num_prefix_tokens, + self.embed_dim)) + + From repo: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + """ + x = torch.cat((cls_token.expand(x.shape[0], -1, -1), x), dim=1) + return x + pos_embed + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: """ - NB, original code used the pos embed from the divit rather than vit - (which we pull from our model) which we use here. - - From timm vit: - self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02) + This is a copy of the function in ArtViT.forward_features + except we also perform an equivalence assertion compared to the implementation + in https://github.com/MadryLab/smoothed-vit (see MadrylabImplementations class above) - From timm dvit: - self.pos_embed = nn.Parameter(torch.zeros(1, - self.patch_embed.num_patches + self.num_prefix_tokens, - self.embed_dim)) + The forward pass of the ViT. - From repo: - self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + :param x: Input data. + :return: The input processed by the ViT backbone """ - x = torch.cat((cls_token.expand(x.shape[0], -1, -1), x), dim=1) - return x + pos_embed + import copy - def forward_features(self, x: torch.Tensor) -> torch.Tensor: - """ - This is a copy of the function in ArtViT.forward_features - except we also perform an equivalence assertion compared to the implementation - in https://github.com/MadryLab/smoothed-vit (see MadrylabImplementations class above) + ablated_input = False + if x.shape[1] == self.in_chans + 1: + ablated_input = True - The forward pass of the ViT. + if ablated_input: + x, ablation_mask = x[:, : self.in_chans], x[:, self.in_chans : self.in_chans + 1] - :param x: Input data. - :return: The input processed by the ViT backbone - """ - import copy + x = self.patch_embed(x) - ablated_input = False - if x.shape[1] == self.in_chans + 1: - ablated_input = True + madry_embed = MadrylabImplementations.embedder(copy.copy(x), self.pos_embed, self.cls_token) + x = self._pos_embed(x) + assert torch.equal(madry_embed, x) - if ablated_input: - x, ablation_mask = x[:, : self.in_chans], x[:, self.in_chans : self.in_chans + 1] + # pass the x into the token dropping code + madry_dropped = MadrylabImplementations.token_dropper(copy.copy(x), ablation_mask) - x = self.patch_embed(x) + if self.to_drop_tokens and ablated_input: + ones = self.ablation_mask_embedder(ablation_mask) + to_drop = torch.sum(ones, dim=2) + indexes = torch.gt(torch.where(to_drop > 1, 1, 0), 0) + x = self.drop_tokens(x, indexes) - madry_embed = MadrylabImplementations.embedder(copy.copy(x), self.pos_embed, self.cls_token) - x = self._pos_embed(x) - assert torch.equal(madry_embed, x) + assert torch.equal(madry_dropped, x) - # pass the x into the token dropping code - madry_dropped = MadrylabImplementations.token_dropper(copy.copy(x), ablation_mask) + x = self.norm_pre(x) + x = self.blocks(x) - if self.to_drop_tokens and ablated_input: - ones = self.ablation_mask_embedder(ablation_mask) - to_drop = torch.sum(ones, dim=2) - indexes = torch.gt(torch.where(to_drop > 1, 1, 0), 0) - x = self.drop_tokens(x, indexes) + return self.norm(x) - assert torch.equal(madry_dropped, x) + # Replace the forward_features with the forward_features code with checks. + PyTorchVisionTransformer.forward_features = forward_features - x = self.norm_pre(x) - x = self.blocks(x) - - return self.norm(x) - - # Replace the forward_features with the forward_features code with checks. - PyTorchViT.forward_features = forward_features - - art_model = PyTorchDeRandomizedSmoothing( - model="vit_small_patch16_224", - loss=torch.nn.CrossEntropyLoss(), - optimizer=torch.optim.SGD, - optimizer_params={"lr": 0.01}, - input_shape=(3, 32, 32), - nb_classes=10, - ablation_size=4, - load_pretrained=False, - replace_last_layer=True, - verbose=False, - ) + art_model = PyTorchDeRandomizedSmoothing( + model="vit_small_patch16_224", + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + load_pretrained=False, + replace_last_layer=True, + verbose=False, + ) - cifar_data = fix_get_cifar10_data[0][:50] - cifar_labels = fix_get_cifar10_data[1][:50] + cifar_data = fix_get_cifar10_data[0][:50] + cifar_labels = fix_get_cifar10_data[1][:50] - scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[1], gamma=0.1) - art_model.fit(cifar_data, cifar_labels, nb_epochs=1, update_batchnorm=True, scheduler=scheduler, batch_size=128) + scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[1], gamma=0.1) + art_model.fit(cifar_data, cifar_labels, nb_epochs=1, update_batchnorm=True, scheduler=scheduler, batch_size=128) + except ARTTestException as e: + art_warning(e) diff --git a/tests/estimators/certification/smooth_vit/smooth_vit_weights/head_bias.npy b/utils/resources/models/certification/smooth_vit/head_bias.npy similarity index 100% rename from tests/estimators/certification/smooth_vit/smooth_vit_weights/head_bias.npy rename to utils/resources/models/certification/smooth_vit/head_bias.npy diff --git a/tests/estimators/certification/smooth_vit/smooth_vit_weights/head_weight.npy b/utils/resources/models/certification/smooth_vit/head_weight.npy similarity index 100% rename from tests/estimators/certification/smooth_vit/smooth_vit_weights/head_weight.npy rename to utils/resources/models/certification/smooth_vit/head_weight.npy From da4ee1cebd80ec72b256344af5d41ed0183daad5 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Mon, 4 Sep 2023 16:56:47 +0000 Subject: [PATCH 52/55] mypy fix Signed-off-by: GiulioZizzo --- art/estimators/certification/derandomized_smoothing/pytorch.py | 1 + 1 file changed, 1 insertion(+) diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 855540bef7..552a3addda 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -50,6 +50,7 @@ import torch import torchvision from timm.models.vision_transformer import VisionTransformer + from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchVisionTransformer from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.defences.preprocessor import Preprocessor from art.defences.postprocessor import Postprocessor From 94327123899accee7f3491fbe0abdb081819a7ae Mon Sep 17 00:00:00 2001 From: GiulioZizzo <41791963+GiulioZizzo@users.noreply.github.com> Date: Tue, 12 Sep 2023 13:45:48 +0100 Subject: [PATCH 53/55] Final review updates Co-authored-by: Beat Buesser <49047826+beat-buesser@users.noreply.github.com> Signed-off-by: GiulioZizzo --- .../certification/derandomized_smoothing/derandomized.py | 3 ++- .../derandomized_smoothing/vision_transformers/vit.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/art/estimators/certification/derandomized_smoothing/derandomized.py b/art/estimators/certification/derandomized_smoothing/derandomized.py index e41435da39..9e2ee6ca0d 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized.py @@ -16,9 +16,10 @@ # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ -This module implements (De)Randomized Smoothing defences from papers: +This module implements (De)Randomized Smoothing certifications against adversarial patches. | Paper link: https://arxiv.org/abs/2110.07719 + | Paper link: https://arxiv.org/abs/2002.10733 """ diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py index cfa788a573..48f96eefab 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py @@ -126,7 +126,7 @@ class PyTorchVisionTransformer(VisionTransformer): def __init__(self, **kwargs): """ - Create a PyTorchViT instance + Create a PyTorchVisionTransformer instance :param kwargs: keyword arguments required to create the mask embedder and the vision transformer class """ From 584218a351ac9c807f9a3df971424dd489ccbd69 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Tue, 12 Sep 2023 14:21:20 +0100 Subject: [PATCH 54/55] name changes to follow ART patterns Signed-off-by: GiulioZizzo --- .../certification/derandomized_smoothing/pytorch.py | 8 +++++--- .../vision_transformers/{vit.py => pytorch.py} | 0 ...st_smooth_vit.py => test_vision_transformers.py} | 8 ++++---- .../smooth_vit}/cumulative_predictions.npy | Bin .../certification/smooth_vit}/madry_preds_block.pt | Bin .../certification/smooth_vit}/madry_preds_column.pt | Bin 6 files changed, 9 insertions(+), 7 deletions(-) rename art/estimators/certification/derandomized_smoothing/vision_transformers/{vit.py => pytorch.py} (100%) rename tests/estimators/certification/{test_smooth_vit.py => test_vision_transformers.py} (98%) rename {tests/estimators/certification/smooth_vit/smooth_vit_results => utils/resources/models/certification/smooth_vit}/cumulative_predictions.npy (100%) rename {tests/estimators/certification/smooth_vit/smooth_vit_results => utils/resources/models/certification/smooth_vit}/madry_preds_block.pt (100%) rename {tests/estimators/certification/smooth_vit/smooth_vit_results => utils/resources/models/certification/smooth_vit}/madry_preds_column.pt (100%) diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 552a3addda..cd3e53243b 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -50,7 +50,7 @@ import torch import torchvision from timm.models.vision_transformer import VisionTransformer - from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchVisionTransformer + from art.estimators.certification.derandomized_smoothing.vision_transformers.pytorch import PyTorchVisionTransformer from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.defences.preprocessor import Preprocessor from art.defences.postprocessor import Postprocessor @@ -145,7 +145,7 @@ def __init__( if isinstance(model, (VisionTransformer, str)): import timm - from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import ( + from art.estimators.certification.derandomized_smoothing.vision_transformers.pytorch import ( PyTorchVisionTransformer, ) @@ -417,7 +417,9 @@ def create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) from timm.models._builder import build_model_with_cfg from timm.models.vision_transformer import checkpoint_filter_fn - from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchVisionTransformer + from art.estimators.certification.derandomized_smoothing.vision_transformers.pytorch import ( + PyTorchVisionTransformer, + ) return build_model_with_cfg( PyTorchVisionTransformer, diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py similarity index 100% rename from art/estimators/certification/derandomized_smoothing/vision_transformers/vit.py rename to art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py diff --git a/tests/estimators/certification/test_smooth_vit.py b/tests/estimators/certification/test_vision_transformers.py similarity index 98% rename from tests/estimators/certification/test_smooth_vit.py rename to tests/estimators/certification/test_vision_transformers.py index 414e9d6f46..9a42b8eb97 100644 --- a/tests/estimators/certification/test_smooth_vit.py +++ b/tests/estimators/certification/test_vision_transformers.py @@ -251,7 +251,7 @@ def test_pytorch_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data) gt_preds = np.load( os.path.join( os.path.dirname(os.path.dirname(__file__)), - "certification/smooth_vit/smooth_vit_results/cumulative_predictions.npy", + "../../utils/resources/models/certification/smooth_vit/cumulative_predictions.npy", ) ) @@ -338,7 +338,7 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 madry_preds = torch.load( os.path.join( os.path.dirname(os.path.dirname(__file__)), - "certification/smooth_vit/smooth_vit_results/madry_preds_column.pt", + "../../utils/resources/models/certification/smooth_vit/madry_preds_column.pt", ) ) art_preds = art_model.model(ablated) @@ -357,7 +357,7 @@ def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10 madry_preds = torch.load( os.path.join( os.path.dirname(os.path.dirname(__file__)), - "certification/smooth_vit/smooth_vit_results/madry_preds_block.pt", + "../../utils/resources/models/certification/smooth_vit/madry_preds_block.pt", ) ) art_preds = art_model.model(ablated) @@ -454,7 +454,7 @@ def test_certification_equivalence(art_warning, fix_get_mnist_data, fix_get_cifa def test_equivalence(art_warning, fix_get_cifar10_data): import torch from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing - from art.estimators.certification.derandomized_smoothing.vision_transformers.vit import PyTorchVisionTransformer + from art.estimators.certification.derandomized_smoothing.vision_transformers.pytorch import PyTorchVisionTransformer device = torch.device("cuda" if torch.cuda.is_available() else "cpu") diff --git a/tests/estimators/certification/smooth_vit/smooth_vit_results/cumulative_predictions.npy b/utils/resources/models/certification/smooth_vit/cumulative_predictions.npy similarity index 100% rename from tests/estimators/certification/smooth_vit/smooth_vit_results/cumulative_predictions.npy rename to utils/resources/models/certification/smooth_vit/cumulative_predictions.npy diff --git a/tests/estimators/certification/smooth_vit/smooth_vit_results/madry_preds_block.pt b/utils/resources/models/certification/smooth_vit/madry_preds_block.pt similarity index 100% rename from tests/estimators/certification/smooth_vit/smooth_vit_results/madry_preds_block.pt rename to utils/resources/models/certification/smooth_vit/madry_preds_block.pt diff --git a/tests/estimators/certification/smooth_vit/smooth_vit_results/madry_preds_column.pt b/utils/resources/models/certification/smooth_vit/madry_preds_column.pt similarity index 100% rename from tests/estimators/certification/smooth_vit/smooth_vit_results/madry_preds_column.pt rename to utils/resources/models/certification/smooth_vit/madry_preds_column.pt From fd69156955b7e16ad326c9ec907a9f12578dc165 Mon Sep 17 00:00:00 2001 From: GiulioZizzo Date: Fri, 15 Sep 2023 10:20:27 +0100 Subject: [PATCH 55/55] fix merge with dev Signed-off-by: GiulioZizzo --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 4058a94ebd..fdf9af2298 100644 --- a/setup.py +++ b/setup.py @@ -114,6 +114,7 @@ def get_version(rel_path): "numba", "timm", "multiprocess", + ] }, classifiers=[ "Development Status :: 3 - Alpha",