diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index ab7ab24822..631f3f539f 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -50,7 +50,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d' requirements_test.txt) + pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d' requirements_test.txt) pip install scipy==1.5.4 pip install matplotlib==3.3.4 pip install pandas==1.1.5 diff --git a/art/estimators/certification/__init__.py b/art/estimators/certification/__init__.py index 92e79a0233..83a69eb514 100644 --- a/art/estimators/certification/__init__.py +++ b/art/estimators/certification/__init__.py @@ -6,7 +6,6 @@ from art.estimators.certification.randomized_smoothing.numpy import NumpyRandomizedSmoothing from art.estimators.certification.randomized_smoothing.tensorflow import TensorFlowV2RandomizedSmoothing from art.estimators.certification.randomized_smoothing.pytorch import PyTorchRandomizedSmoothing -from art.estimators.certification.derandomized_smoothing.derandomized_smoothing import DeRandomizedSmoothingMixin from art.estimators.certification.derandomized_smoothing.pytorch import PyTorchDeRandomizedSmoothing from art.estimators.certification.derandomized_smoothing.tensorflow import TensorFlowV2DeRandomizedSmoothing from art.estimators.certification.object_seeker.object_seeker import ObjectSeekerMixin diff --git a/art/estimators/certification/derandomized_smoothing/__init__.py b/art/estimators/certification/derandomized_smoothing/__init__.py index 1eea6eb3da..69753f4f39 100644 --- a/art/estimators/certification/derandomized_smoothing/__init__.py +++ b/art/estimators/certification/derandomized_smoothing/__init__.py @@ -1,6 +1,5 @@ """ DeRandomized smoothing estimators. """ -from art.estimators.certification.derandomized_smoothing.derandomized_smoothing import DeRandomizedSmoothingMixin from art.estimators.certification.derandomized_smoothing.pytorch import PyTorchDeRandomizedSmoothing from art.estimators.certification.derandomized_smoothing.tensorflow import TensorFlowV2DeRandomizedSmoothing diff --git a/art/estimators/certification/derandomized_smoothing/ablators/__init__.py b/art/estimators/certification/derandomized_smoothing/ablators/__init__.py new file mode 100644 index 0000000000..23715d4aba --- /dev/null +++ b/art/estimators/certification/derandomized_smoothing/ablators/__init__.py @@ -0,0 +1,12 @@ +""" +This module contains the ablators for the certified smoothing approaches. +""" +import importlib + +from art.estimators.certification.derandomized_smoothing.ablators.tensorflow import ColumnAblator, BlockAblator + +if importlib.util.find_spec("torch") is not None: + from art.estimators.certification.derandomized_smoothing.ablators.pytorch import ( + ColumnAblatorPyTorch, + BlockAblatorPyTorch, + ) diff --git a/art/estimators/certification/derandomized_smoothing/ablators/ablate.py b/art/estimators/certification/derandomized_smoothing/ablators/ablate.py new file mode 100644 index 0000000000..3970b5b862 --- /dev/null +++ b/art/estimators/certification/derandomized_smoothing/ablators/ablate.py @@ -0,0 +1,90 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2022 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +""" +This module implements the abstract base class for the ablators. +""" +from __future__ import absolute_import, division, print_function, unicode_literals + +from abc import ABC, abstractmethod +from typing import Optional, Tuple, Union, TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + # pylint: disable=C0412 + import tensorflow as tf + import torch + + +class BaseAblator(ABC): + """ + Base class defining the methods used for the ablators. + """ + + @abstractmethod + def __call__( + self, x: np.ndarray, column_pos: Optional[Union[int, list]] = None, row_pos: Optional[Union[int, list]] = None + ) -> np.ndarray: + """ + Ablate the image x at location specified by "column_pos" for the case of column ablation or at the location + specified by "column_pos" and "row_pos" in the case of block ablation. + + :param x: input image. + :param column_pos: column position to specify where to retain the image + :param row_pos: row position to specify where to retain the image. Not used for ablation type "column". + """ + raise NotImplementedError + + @abstractmethod + def certify( + self, pred_counts: np.ndarray, size_to_certify: int, label: Union[np.ndarray, "tf.Tensor"] + ) -> Union[Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"], Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]]: + """ + Checks if based on the predictions supplied the classifications over the ablated datapoints result in a + certified prediction against a patch attack of size size_to_certify. + + :param pred_counts: The cumulative predictions of the classifier over the ablation locations. + :param size_to_certify: The size of the patch to check against. + :param label: ground truth labels + """ + raise NotImplementedError + + @abstractmethod + def ablate(self, x: np.ndarray, column_pos: int, row_pos: int) -> Union[np.ndarray, "torch.Tensor"]: + """ + Ablate the image x at location specified by "column_pos" for the case of column ablation or at the location + specified by "column_pos" and "row_pos" in the case of block ablation. + + :param x: input image. + :param column_pos: column position to specify where to retain the image + :param row_pos: row position to specify where to retain the image. Not used for ablation type "column". + """ + raise NotImplementedError + + @abstractmethod + def forward( + self, x: np.ndarray, column_pos: Optional[int] = None, row_pos: Optional[int] = None + ) -> Union[np.ndarray, "torch.Tensor"]: + """ + Ablate batch of data at locations specified by column_pos and row_pos + + :param x: input image. + :param column_pos: column position to specify where to retain the image + :param row_pos: row position to specify where to retain the image. Not used for ablation type "column". + """ + raise NotImplementedError diff --git a/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py b/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py new file mode 100644 index 0000000000..1f1ad1aeec --- /dev/null +++ b/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py @@ -0,0 +1,401 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +""" +This module implements Certified Patch Robustness via Smoothed Vision Transformers + +| Paper link Accepted version: + https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf + +| Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf +""" + +from typing import Optional, Union, Tuple +import random + +import numpy as np +import torch + +from art.estimators.certification.derandomized_smoothing.ablators.ablate import BaseAblator + + +class UpSamplerPyTorch(torch.nn.Module): + """ + Resizes datasets to the specified size. + Usually for upscaling datasets like CIFAR to Imagenet format + """ + + def __init__(self, input_size: int, final_size: int) -> None: + """ + Creates an upsampler to make the supplied data match the pre-trained ViT format + + :param input_size: Size of the current input data + :param final_size: Desired final size + """ + super().__init__() + self.upsample = torch.nn.Upsample(scale_factor=final_size / input_size) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Forward pass though the upsampler. + + :param x: Input data + :return: The upsampled input data + """ + return self.upsample(x) + + +class ColumnAblatorPyTorch(torch.nn.Module, BaseAblator): + """ + Pure Pytorch implementation of stripe/column ablation. + """ + + def __init__( + self, + ablation_size: int, + channels_first: bool, + mode: str, + to_reshape: bool, + ablation_mode: str = "column", + original_shape: Optional[Tuple] = None, + output_shape: Optional[Tuple] = None, + algorithm: str = "salman2021", + device_type: str = "gpu", + ): + """ + Creates a column ablator + + :param ablation_size: The size of the column we will retain. + :param channels_first: If the input is in channels first format. Currently required to be True. + :param mode: If we are running the algorithm using a CNN or VIT. + :param to_reshape: If the input requires reshaping. + :param ablation_mode: The type of ablation to perform. + :param original_shape: Original shape of the input. + :param output_shape: Input shape expected by the ViT. Usually means upscaling the input to 224 x 224. + :param algorithm: Either 'salman2021' or 'levine2020'. + :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. + """ + super().__init__() + + self.ablation_size = ablation_size + self.channels_first = channels_first + self.to_reshape = to_reshape + self.add_ablation_mask = False + self.additional_channels = False + self.algorithm = algorithm + self.original_shape = original_shape + self.ablation_mode = ablation_mode + + if self.algorithm == "levine2020": + self.additional_channels = True + if self.algorithm == "salman2021" and mode == "ViT": + self.add_ablation_mask = True + + if device_type == "cpu" or not torch.cuda.is_available(): + self.device = torch.device("cpu") + else: # pragma: no cover + cuda_idx = torch.cuda.current_device() + self.device = torch.device(f"cuda:{cuda_idx}") + + if original_shape is not None and output_shape is not None: + self.upsample = UpSamplerPyTorch(input_size=original_shape[1], final_size=output_shape[1]) + + def ablate( + self, x: Union[torch.Tensor, np.ndarray], column_pos: int, row_pos: Optional[int] = None + ) -> torch.Tensor: + """ + Ablates the input column wise + + :param x: Input data + :param column_pos: location to start the retained column. NB, if row_ablation_mode is true then this will + be used to act on the rows through transposing the image in ColumnAblatorPyTorch.forward + :param row_pos: Unused. + :return: The ablated input with 0s where the ablation occurred + """ + k = self.ablation_size + + if isinstance(x, np.ndarray): + x = torch.from_numpy(x).to(self.device) + + if column_pos + k > x.shape[-1]: + x[:, :, :, (column_pos + k) % x.shape[-1] : column_pos] = 0.0 + else: + x[:, :, :, :column_pos] = 0.0 + x[:, :, :, column_pos + k :] = 0.0 + return x + + def forward( + self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None, row_pos=None + ) -> torch.Tensor: + """ + Forward pass though the ablator. We insert a new channel to keep track of the ablation location. + + :param x: Input data + :param column_pos: The start position of the albation + :param row_pos: Unused. + :return: The albated input with an extra channel indicating the location of the ablation + """ + if row_pos is not None: + raise ValueError("Use column_pos for a ColumnAblator. The row_pos argument is unused") + + if self.original_shape is not None and x.shape[1] != self.original_shape[0] and self.algorithm == "salman2021": + raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") + + if isinstance(x, np.ndarray): + x = torch.from_numpy(x).to(self.device) + + if self.add_ablation_mask: + ones = torch.torch.ones_like(x[:, 0:1, :, :]).to(self.device) + x = torch.cat([x, ones], dim=1) + + if self.additional_channels: + x = torch.cat([x, 1.0 - x], dim=1) + + if self.original_shape is not None and x.shape[1] != self.original_shape[0] and self.additional_channels: + raise ValueError( + f"Ablator expected {self.original_shape[0]} input channels. Received shape of {x.shape[1]}" + ) + + if self.ablation_mode == "row": + x = torch.transpose(x, 3, 2) + + if column_pos is None: + column_pos = random.randint(0, x.shape[3]) + + ablated_x = self.ablate(x, column_pos=column_pos) + + if self.ablation_mode == "row": + ablated_x = torch.transpose(ablated_x, 3, 2) + + if self.to_reshape: + ablated_x = self.upsample(ablated_x) + return ablated_x + + def certify( + self, + pred_counts: Union[torch.Tensor, np.ndarray], + size_to_certify: int, + label: Union[torch.Tensor, np.ndarray], + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Performs certification of the predictions + + :param pred_counts: The model predictions over the ablated data. + :param size_to_certify: The patch size we wish to check certification against + :param label: The ground truth labels + :return: A tuple consisting of: the certified predictions, + the predictions which were certified and also correct, + and the most predicted class across the different ablations on the input. + """ + + if isinstance(pred_counts, np.ndarray): + pred_counts = torch.from_numpy(pred_counts).to(self.device) + + if isinstance(label, np.ndarray): + label = torch.from_numpy(label).to(self.device) + + num_of_classes = pred_counts.shape[-1] + + # NB! argmax and kthvalue handle ties between predicted counts differently. + # The original implementation: https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L98 + # uses argmax for the model predictions + # (later called y_smoothed https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L230) + # and kthvalue for the certified predictions. + # to be consistent with the original implementation we also follow this here. + top_predicted_class_argmax = torch.argmax(pred_counts, dim=1) + + top_class_counts, top_predicted_class = pred_counts.kthvalue(num_of_classes, dim=1) + second_class_counts, second_predicted_class = pred_counts.kthvalue(num_of_classes - 1, dim=1) + + cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) + + if self.algorithm == "levine2020": + tie_break_certs = ( + (top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1) + ) & (top_predicted_class < second_predicted_class) + cert = torch.logical_or(cert, tie_break_certs) + + cert_and_correct = cert & (label == top_predicted_class) + + return cert, cert_and_correct, top_predicted_class_argmax + + +class BlockAblatorPyTorch(torch.nn.Module, BaseAblator): + """ + Pure Pytorch implementation of block ablation. + """ + + def __init__( + self, + ablation_size: int, + channels_first: bool, + mode: str, + to_reshape: bool, + original_shape: Optional[Tuple] = None, + output_shape: Optional[Tuple] = None, + algorithm: str = "salman2021", + device_type: str = "gpu", + ): + """ + Creates a column ablator + + :param ablation_size: The size of the block we will retain. + :param channels_first: If the input is in channels first format. Currently required to be True. + :param mode: If we are running the algorithm using a CNN or VIT. + :param to_reshape: If the input requires reshaping. + :param original_shape: Original shape of the input. + :param output_shape: Input shape expected by the ViT. Usually means upscaling the input to 224 x 224. + :param algorithm: Either 'salman2021' or 'levine2020'. + :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. + """ + super().__init__() + + self.ablation_size = ablation_size + self.channels_first = channels_first + self.to_reshape = to_reshape + self.add_ablation_mask = False + self.additional_channels = False + self.algorithm = algorithm + self.original_shape = original_shape + + if self.algorithm == "levine2020": + self.additional_channels = True + if self.algorithm == "salman2021" and mode == "ViT": + self.add_ablation_mask = True + + if device_type == "cpu" or not torch.cuda.is_available(): + self.device = torch.device("cpu") + else: # pragma: no cover + cuda_idx = torch.cuda.current_device() + self.device = torch.device(f"cuda:{cuda_idx}") + + if original_shape is not None and output_shape is not None: + self.upsample = UpSamplerPyTorch(input_size=original_shape[1], final_size=output_shape[1]) + + def ablate(self, x: Union[torch.Tensor, np.ndarray], column_pos: int, row_pos: int) -> torch.Tensor: + """ + Ablates the input block wise + + :param x: Input data + :param column_pos: The start position of the albation + :param row_pos: The row start position of the albation + :return: The ablated input with 0s where the ablation occurred + """ + + if isinstance(x, np.ndarray): + x = torch.from_numpy(x).to(self.device) + + k = self.ablation_size + # Column ablations + if column_pos + k > x.shape[-1]: + x[:, :, :, (column_pos + k) % x.shape[-1] : column_pos] = 0.0 + else: + x[:, :, :, :column_pos] = 0.0 + x[:, :, :, column_pos + k :] = 0.0 + + # Row ablations + if row_pos + k > x.shape[-2]: + x[:, :, (row_pos + k) % x.shape[-2] : row_pos, :] = 0.0 + else: + x[:, :, :row_pos, :] = 0.0 + x[:, :, row_pos + k :, :] = 0.0 + return x + + def forward( + self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None, row_pos: Optional[int] = None + ) -> torch.Tensor: + """ + Forward pass though the ablator. We insert a new channel to keep track of the ablation location. + + :param x: Input data + :param column_pos: The start position of the albation + :return: The albated input with an extra channel indicating the location of the ablation if running in + """ + if self.original_shape is not None and x.shape[1] != self.original_shape[0] and self.algorithm == "salman2021": + raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") + + if column_pos is None: + column_pos = random.randint(0, x.shape[3]) + + if row_pos is None: + row_pos = random.randint(0, x.shape[2]) + + if isinstance(x, np.ndarray): + x = torch.from_numpy(x).to(self.device) + + if self.add_ablation_mask: + ones = torch.torch.ones_like(x[:, 0:1, :, :]).to(self.device) + x = torch.cat([x, ones], dim=1) + + if self.additional_channels: + x = torch.cat([x, 1.0 - x], dim=1) + + if self.original_shape is not None and x.shape[1] != self.original_shape[0] and self.additional_channels: + raise ValueError(f"Ablator expected {self.original_shape[0]} input channels. Recived shape of {x.shape[1]}") + + ablated_x = self.ablate(x, column_pos=column_pos, row_pos=row_pos) + + if self.to_reshape: + ablated_x = self.upsample(ablated_x) + return ablated_x + + def certify( + self, + pred_counts: Union[torch.Tensor, np.ndarray], + size_to_certify: int, + label: Union[torch.Tensor, np.ndarray], + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Performs certification of the predictions + + :param pred_counts: The model predictions over the ablated data. + :param size_to_certify: The patch size we wish to check certification against + :param label: The ground truth labels + :return: A tuple consisting of: the certified predictions, + the predictions which were certified and also correct, + and the most predicted class across the different ablations on the input. + """ + + if isinstance(pred_counts, np.ndarray): + pred_counts = torch.from_numpy(pred_counts).to(self.device) + + if isinstance(label, np.ndarray): + label = torch.from_numpy(label).to(self.device) + + # NB! argmax and kthvalue handle ties between predicted counts differently. + # The original implementation: https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L145 + # uses argmax for the model predictions + # (later called y_smoothed https://github.com/MadryLab/smoothed-vit/blob/main/src/utils/smoothing.py#L230) + # and kthvalue for the certified predictions. + # to be consistent with the original implementation we also follow this here. + top_predicted_class_argmax = torch.argmax(pred_counts, dim=1) + + num_of_classes = pred_counts.shape[-1] + + top_class_counts, top_predicted_class = pred_counts.kthvalue(num_of_classes, dim=1) + second_class_counts, second_predicted_class = pred_counts.kthvalue(num_of_classes - 1, dim=1) + + cert = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) ** 2 + + cert_and_correct = cert & (label == top_predicted_class) + + if self.algorithm == "levine2020": + tie_break_certs = ( + (top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1) ** 2 + ) & (top_predicted_class < second_predicted_class) + cert = torch.logical_or(cert, tie_break_certs) + return cert, cert_and_correct, top_predicted_class_argmax diff --git a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py b/art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py similarity index 51% rename from art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py rename to art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py index 42a31ca418..e4b927358e 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized_smoothing.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py @@ -23,176 +23,16 @@ from __future__ import absolute_import, division, print_function, unicode_literals -from abc import ABC, abstractmethod -from typing import Optional, Union, TYPE_CHECKING +from typing import Optional, Union, Tuple, TYPE_CHECKING import random import numpy as np -if TYPE_CHECKING: - from art.utils import ABLATOR_TYPE - - -class DeRandomizedSmoothingMixin(ABC): - """ - Implementation of (De)Randomized Smoothing applied to classifier predictions as introduced - in Levine et al. (2020). - - | Paper link: https://arxiv.org/abs/2002.10733 - """ - - def __init__( - self, - ablation_type: str, - ablation_size: int, - threshold: float, - logits: bool, - channels_first: bool, - *args, - **kwargs, - ) -> None: - """ - Create a derandomized smoothing wrapper. - - :param ablation_type: The type of ablations to perform. Currently must be either "column", "row", or "block" - :param ablation_size: Size of the retained image patch. - An int specifying the width of the column for column ablation - Or an int specifying the height/width of a square for block ablation - :param threshold: The minimum threshold to count a prediction. - :param logits: if the model returns logits or normalized probabilities - :param channels_first: If the channels are first or last. - """ - super().__init__(*args, **kwargs) # type: ignore - self.ablation_type = ablation_type - self.logits = logits - self.threshold = threshold - self._channels_first = channels_first - if TYPE_CHECKING: - self.ablator: ABLATOR_TYPE # pylint: disable=used-before-assignment - - if self.ablation_type in {"column", "row"}: - row_ablation_mode = self.ablation_type == "row" - self.ablator = ColumnAblator( - ablation_size=ablation_size, channels_first=self._channels_first, row_ablation_mode=row_ablation_mode - ) - elif self.ablation_type == "block": - self.ablator = BlockAblator(ablation_size=ablation_size, channels_first=self._channels_first) - else: - raise ValueError("Ablation type not supported. Must be either column or block") - - def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: - """ - Perform prediction for a batch of inputs. - - :param x: Input samples. - :param batch_size: Size of batches. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. - :return: Array of predictions of shape `(nb_inputs, nb_classes)`. - """ - raise NotImplementedError - - def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: - """ - Performs cumulative predictions over every ablation location - - :param x: Unablated image - :param batch_size: the batch size for the prediction - :param training_mode: if to run the classifier in training mode - :return: cumulative predictions after sweeping over all the ablation configurations. - """ - if self._channels_first: - columns_in_data = x.shape[-1] - rows_in_data = x.shape[-2] - else: - columns_in_data = x.shape[-2] - rows_in_data = x.shape[-3] - - if self.ablation_type in {"column", "row"}: - if self.ablation_type == "column": - ablate_over_range = columns_in_data - else: - # image will be transposed, so loop over the number of rows - ablate_over_range = rows_in_data - - for ablation_start in range(ablate_over_range): - ablated_x = self.ablator.forward(np.copy(x), column_pos=ablation_start) - if ablation_start == 0: - preds = self._predict_classifier( - ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs - ) - else: - preds += self._predict_classifier( - ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs - ) - elif self.ablation_type == "block": - for xcorner in range(rows_in_data): - for ycorner in range(columns_in_data): - ablated_x = self.ablator.forward(np.copy(x), row_pos=xcorner, column_pos=ycorner) - if ycorner == 0 and xcorner == 0: - preds = self._predict_classifier( - ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs - ) - else: - preds += self._predict_classifier( - ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs - ) - return preds - - -class BaseAblator(ABC): - """ - Base class defining the methods used for the ablators. - """ - - @abstractmethod - def __call__( - self, x: np.ndarray, column_pos: Optional[Union[int, list]] = None, row_pos: Optional[Union[int, list]] = None - ) -> np.ndarray: - """ - Ablate the image x at location specified by "column_pos" for the case of column ablation or at the location - specified by "column_pos" and "row_pos" in the case of block ablation. - - :param x: input image. - :param column_pos: column position to specify where to retain the image - :param row_pos: row position to specify where to retain the image. Not used for ablation type "column". - """ - raise NotImplementedError - - @abstractmethod - def certify(self, preds: np.ndarray, size_to_certify: int): - """ - Checks if based on the predictions supplied the classifications over the ablated datapoints result in a - certified prediction against a patch attack of size size_to_certify. - - :param preds: The cumulative predictions of the classifier over the ablation locations. - :param size_to_certify: The size of the patch to check against. - """ - raise NotImplementedError - - @abstractmethod - def ablate(self, x: np.ndarray, column_pos: int, row_pos: int) -> np.ndarray: - """ - Ablate the image x at location specified by "column_pos" for the case of column ablation or at the location - specified by "column_pos" and "row_pos" in the case of block ablation. +from art.estimators.certification.derandomized_smoothing.ablators.ablate import BaseAblator - :param x: input image. - :param column_pos: column position to specify where to retain the image - :param row_pos: row position to specify where to retain the image. Not used for ablation type "column". - """ - raise NotImplementedError - - @abstractmethod - def forward( - self, x: np.ndarray, column_pos: Optional[Union[int, list]] = None, row_pos: Optional[Union[int, list]] = None - ) -> np.ndarray: - """ - Ablate batch of data at locations specified by column_pos and row_pos - - :param x: input image. - :param column_pos: column position to specify where to retain the image - :param row_pos: row position to specify where to retain the image. Not used for ablation type "column". - """ - raise NotImplementedError +if TYPE_CHECKING: + # pylint: disable=C0412 + import tensorflow as tf class ColumnAblator(BaseAblator): @@ -230,27 +70,50 @@ def __call__( """ return self.forward(x=x, column_pos=column_pos) - def certify(self, preds: np.ndarray, size_to_certify: int) -> np.ndarray: + def certify( + self, pred_counts: "tf.Tensor", size_to_certify: int, label: Union[np.ndarray, "tf.Tensor"] + ) -> Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"]: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. :param preds: The cumulative predictions of the classifier over the ablation locations. :param size_to_certify: The size of the patch to check against. - :return: Array of bools indicating if a point is certified against the given patch dimensions. + :param label: Ground truth labels + :return: A tuple consisting of: the certified predictions, + the predictions which were certified and also correct, + and the most predicted class across the different ablations on the input. """ - indices = np.argsort(-preds, axis=1, kind="stable") - values = np.take_along_axis(np.copy(preds), indices, axis=1) + import tensorflow as tf - num_affected_classifications = size_to_certify + self.ablation_size - 1 + result = tf.math.top_k(pred_counts, k=2) - margin = values[:, 0] - values[:, 1] + top_predicted_class, second_predicted_class = result.indices[:, 0], result.indices[:, 1] + top_class_counts, second_class_counts = result.values[:, 0], result.values[:, 1] - certs = margin > 2 * num_affected_classifications - tie_break_certs = (margin == 2 * num_affected_classifications) & (indices[:, 0] < indices[:, 1]) - return np.logical_or(certs, tie_break_certs) + certs = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) - def ablate(self, x: np.ndarray, column_pos: int, row_pos=None) -> np.ndarray: + tie_break_certs = ( + (top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1) + ) & (top_predicted_class < second_predicted_class) + cert = tf.math.logical_or(certs, tie_break_certs) + + # NB, newer versions of pylint do not require the disable. + if label.ndim > 1: + cert_and_correct = cert & ( + tf.math.argmax(label, axis=1) + == tf.cast( # pylint: disable=E1120, E1123 + top_predicted_class, dtype=tf.math.argmax(label, axis=1).dtype + ) + ) + else: + cert_and_correct = cert & ( + label == tf.cast(top_predicted_class, dtype=label.dtype) # pylint: disable=E1120, E1123 + ) + + return cert, cert_and_correct, top_predicted_class + + def ablate(self, x: np.ndarray, column_pos: int, row_pos: Optional[int] = None) -> np.ndarray: """ Ablates the image only retaining a column starting at "pos" of width "self.ablation_size" @@ -348,24 +211,47 @@ def __call__( """ return self.forward(x=x, row_pos=row_pos, column_pos=column_pos) - def certify(self, preds: np.ndarray, size_to_certify: int) -> np.ndarray: + def certify( + self, pred_counts: Union["tf.Tensor", np.ndarray], size_to_certify: int, label: Union[np.ndarray, "tf.Tensor"] + ) -> Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"]: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. - :param preds: The cumulative predictions of the classifier over the ablation locations. + :param pred_counts: The cumulative predictions of the classifier over the ablation locations. :param size_to_certify: The size of the patch to check against. - :return: Array of bools indicating if a point is certified against the given patch dimensions. - """ - indices = np.argsort(-preds, axis=1, kind="stable") - values = np.take_along_axis(np.copy(preds), indices, axis=1) - margin = values[:, 0] - values[:, 1] - - num_affected_classifications = (size_to_certify + self.ablation_size - 1) ** 2 + :param label: Ground truth labels + :return: A tuple consisting of: the certified predictions, + the predictions which were certified and also correct, + and the most predicted class across the different ablations on the input. + """ + import tensorflow as tf + + result = tf.math.top_k(pred_counts, k=2) + + top_predicted_class, second_predicted_class = result.indices[:, 0], result.indices[:, 1] + top_class_counts, second_class_counts = result.values[:, 0], result.values[:, 1] + + certs = (top_class_counts - second_class_counts) > 2 * (size_to_certify + self.ablation_size - 1) ** 2 + tie_break_certs = ( + (top_class_counts - second_class_counts) == 2 * (size_to_certify + self.ablation_size - 1) ** 2 + ) & (top_predicted_class < second_predicted_class) + cert = tf.math.logical_or(certs, tie_break_certs) + + # NB, newer versions of pylint do not require the disable. + if label.ndim > 1: + cert_and_correct = cert & ( + tf.math.argmax(label, axis=1) + == tf.cast( # pylint: disable=E1120, E1123 + top_predicted_class, dtype=tf.math.argmax(label, axis=1).dtype + ) + ) + else: + cert_and_correct = cert & ( + label == tf.cast(top_predicted_class, dtype=label.dtype) # pylint: disable=E1120, E1123 + ) - certs = margin > 2 * num_affected_classifications - tie_break_certs = (margin == 2 * num_affected_classifications) & (indices[:, 0] < indices[:, 1]) - return np.logical_or(certs, tie_break_certs) + return cert, cert_and_correct, top_predicted_class def forward( self, @@ -415,40 +301,17 @@ def ablate(self, x: np.ndarray, column_pos: int, row_pos: int) -> np.ndarray: :return: Data ablated at all locations aside from the specified block. """ k = self.ablation_size - num_of_image_columns = x.shape[3] - num_of_image_rows = x.shape[2] - - if row_pos + k > x.shape[2] and column_pos + k > x.shape[3]: - start_of_ablation = column_pos + k - num_of_image_columns - x[:, :, :, start_of_ablation:column_pos] = 0.0 - - start_of_ablation = row_pos + k - num_of_image_rows - x[:, :, start_of_ablation:row_pos, :] = 0.0 - - # only the row wraps - elif row_pos + k > x.shape[2] and column_pos + k <= x.shape[3]: - x[:, :, :, :column_pos] = 0.0 - x[:, :, :, column_pos + k :] = 0.0 - - start_of_ablation = row_pos + k - num_of_image_rows - x[:, :, start_of_ablation:row_pos, :] = 0.0 - - # only column wraps - elif row_pos + k <= x.shape[2] and column_pos + k > x.shape[3]: - start_of_ablation = column_pos + k - num_of_image_columns - x[:, :, :, start_of_ablation:column_pos] = 0.0 - - x[:, :, :row_pos, :] = 0.0 - x[:, :, row_pos + k :, :] = 0.0 - - # neither wraps - elif row_pos + k <= x.shape[2] and column_pos + k <= x.shape[3]: + # Column ablations + if column_pos + k > x.shape[-1]: + x[:, :, :, (column_pos + k) % x.shape[-1] : column_pos] = 0.0 + else: x[:, :, :, :column_pos] = 0.0 x[:, :, :, column_pos + k :] = 0.0 + # Row ablations + if row_pos + k > x.shape[-2]: + x[:, :, (row_pos + k) % x.shape[-2] : row_pos, :] = 0.0 + else: x[:, :, :row_pos, :] = 0.0 x[:, :, row_pos + k :, :] = 0.0 - else: - raise ValueError(f"Ablation failed on row: {row_pos} and column: {column_pos} with size {k}") - return x diff --git a/art/estimators/certification/derandomized_smoothing/derandomized.py b/art/estimators/certification/derandomized_smoothing/derandomized.py new file mode 100644 index 0000000000..9e2ee6ca0d --- /dev/null +++ b/art/estimators/certification/derandomized_smoothing/derandomized.py @@ -0,0 +1,69 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2022 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +""" +This module implements (De)Randomized Smoothing certifications against adversarial patches. + +| Paper link: https://arxiv.org/abs/2110.07719 + +| Paper link: https://arxiv.org/abs/2002.10733 +""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +from abc import ABC, abstractmethod +import numpy as np + + +class DeRandomizedSmoothingMixin(ABC): + """ + Mixin class for smoothed estimators. + """ + + def __init__( + self, + *args, + **kwargs, + ) -> None: + """ + Create a derandomized smoothing wrapper. + """ + super().__init__(*args, **kwargs) # type: ignore + + @abstractmethod + def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: + """ + Perform prediction for a batch of inputs. + + :param x: Input samples. + :param batch_size: Size of batches. + :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :return: Array of predictions of shape `(nb_inputs, nb_classes)`. + """ + raise NotImplementedError + + @abstractmethod + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: + """ + Performs cumulative predictions over every ablation location + + :param x: Unablated image + :param batch_size: the batch size for the prediction + :param training_mode: if to run the classifier in training mode + :return: cumulative predictions after sweeping over all the ablation configurations. + """ + raise NotImplementedError diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 4a184b3666..cd3e53243b 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -16,13 +16,24 @@ # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ -This module implements (De)Randomized Smoothing for Certifiable Defense against Patch Attacks +This module implements De-Randomized smoothing approaches PyTorch. + +(De)Randomized Smoothing for Certifiable Defense against Patch Attacks | Paper link: https://arxiv.org/abs/2002.10733 + +and + +Certified Patch Robustness via Smoothed Vision Transformers + +| Paper link Accepted version: + https://openaccess.thecvf.com/content/CVPR2022/papers/Salman_Certified_Patch_Robustness_via_Smoothed_Vision_Transformers_CVPR_2022_paper.pdf + +| Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf """ from __future__ import absolute_import, division, print_function, unicode_literals - +import importlib import logging from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING import random @@ -30,15 +41,16 @@ import numpy as np from tqdm import tqdm -from art.config import ART_NUMPY_DTYPE from art.estimators.classification.pytorch import PyTorchClassifier -from art.estimators.certification.derandomized_smoothing.derandomized_smoothing import DeRandomizedSmoothingMixin +from art.estimators.certification.derandomized_smoothing.derandomized import DeRandomizedSmoothingMixin from art.utils import check_and_transform_label_format if TYPE_CHECKING: # pylint: disable=C0412 import torch - + import torchvision + from timm.models.vision_transformer import VisionTransformer + from art.estimators.certification.derandomized_smoothing.vision_transformers.pytorch import PyTorchVisionTransformer from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.defences.preprocessor import Preprocessor from art.defences.postprocessor import Postprocessor @@ -48,47 +60,64 @@ class PyTorchDeRandomizedSmoothing(DeRandomizedSmoothingMixin, PyTorchClassifier): """ - Implementation of (De)Randomized Smoothing applied to classifier predictions as introduced - in Levine et al. (2020). + Interface class for the two De-randomized smoothing approaches supported by ART for pytorch. - | Paper link: https://arxiv.org/abs/2002.10733 - """ + If a regular pytorch neural network is fed in then (De)Randomized Smoothing as introduced in Levine et al. (2020) + is used. - estimator_params = PyTorchClassifier.estimator_params + ["ablation_type", "ablation_size", "threshold", "logits"] + Otherwise, if a timm vision transfomer is fed in then Certified Patch Robustness via Smoothed Vision Transformers + as introduced in Salman et al. (2021) is used. + """ def __init__( self, - model: "torch.nn.Module", + model: Union[str, "VisionTransformer", "torch.nn.Module"], loss: "torch.nn.modules.loss._Loss", input_shape: Tuple[int, ...], nb_classes: int, - ablation_type: str, ablation_size: int, - threshold: float, - logits: bool, - optimizer: Optional["torch.optim.Optimizer"] = None, # type: ignore + algorithm: str = "salman2021", + ablation_type: str = "column", + replace_last_layer: Optional[bool] = None, + drop_tokens: bool = True, + load_pretrained: bool = True, + optimizer: Union[type, "torch.optim.Optimizer", None] = None, + optimizer_params: Optional[dict] = None, channels_first: bool = True, + threshold: Optional[float] = None, + logits: Optional[bool] = True, clip_values: Optional["CLIP_VALUES_TYPE"] = None, preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), device_type: str = "gpu", + verbose: bool = True, + **kwargs, ): """ - Create a derandomized smoothing classifier. + Create a smoothed classifier. - :param model: PyTorch model. The output of the model can be logits, probabilities or anything else. Logits - output should be preferred where possible to ensure attack efficiency. + :param model: Either a CNN or a VIT. For a ViT supply a string specifying which ViT architecture to load from + the ViT library, or a vision transformer already created with the + Pytorch Image Models (timm) library. To run Levine et al. (2020) provide a regular pytorch model. :param loss: The loss function for which to compute gradients for training. The target label must be raw - categorical, i.e. not converted to one-hot encoding. + categorical, i.e. not converted to one-hot encoding. :param input_shape: The shape of one input instance. :param nb_classes: The number of classes of the model. - :param ablation_type: The type of ablation to perform, must be either "column" or "block" - :param ablation_size: The size of the data portion to retain after ablation. Will be a column of size N for - "column" ablation type or a NxN square for ablation of type "block" - :param threshold: The minimum threshold to count a prediction. - :param logits: if the model returns logits or normalized probabilities + :param ablation_size: The size of the data portion to retain after ablation. + :param algorithm: Either 'salman2021' or 'levine2020'. For salman2021 we support ViTs and CNNs. For levine2020 + there is only CNN support. + :param replace_last_layer: ViT Specific. If to replace the last layer of the ViT with a fresh layer + matching the number of classes for the dataset to be examined. + Needed if going from the pre-trained imagenet models to fine-tune + on a dataset like CIFAR. + :param drop_tokens: ViT Specific. If to drop the fully ablated tokens in the ViT + :param load_pretrained: ViT Specific. If to load a pretrained model matching the ViT name. + Will only affect the ViT if a string name is passed to model rather than a ViT directly. :param optimizer: The optimizer used to train the classifier. + :param ablation_type: The type of ablation to perform. Either "column", "row", or "block" + :param threshold: Specific to Levine et al. The minimum threshold to count a prediction. + :param logits: Specific to Levine et al. If the model returns logits or normalized probabilities :param channels_first: Set channels first or last. :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and maximum values allowed for features. If floats are provided, these will be used as the range of all @@ -101,52 +130,304 @@ def __init__( be divided by the second one. :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. """ - super().__init__( - model=model, - loss=loss, - input_shape=input_shape, - nb_classes=nb_classes, - optimizer=optimizer, - channels_first=channels_first, - clip_values=clip_values, - preprocessing_defences=preprocessing_defences, - postprocessing_defences=postprocessing_defences, - preprocessing=preprocessing, - device_type=device_type, - ablation_type=ablation_type, - ablation_size=ablation_size, - threshold=threshold, - logits=logits, - ) - def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: import torch - x = x.astype(ART_NUMPY_DTYPE) - outputs = PyTorchClassifier.predict(self, x=x, batch_size=batch_size, training_mode=training_mode, **kwargs) + if not channels_first: + raise ValueError("Channels must be set to first") + logger.info("Running algorithm: %s", algorithm) + + # Default value for output shape + output_shape = input_shape + self.mode = None + if importlib.util.find_spec("timm") is not None and algorithm == "salman2021": + from timm.models.vision_transformer import VisionTransformer + + if isinstance(model, (VisionTransformer, str)): + import timm + from art.estimators.certification.derandomized_smoothing.vision_transformers.pytorch import ( + PyTorchVisionTransformer, + ) + + if replace_last_layer is None: + raise ValueError("If using ViTs please specify if the last layer should be replaced") + + # temporarily assign the original method to tmp_func + tmp_func = timm.models.vision_transformer._create_vision_transformer + + # overrride with ART's ViT creation function + timm.models.vision_transformer._create_vision_transformer = self.create_vision_transformer + if isinstance(model, str): + model = timm.create_model( + model, pretrained=load_pretrained, drop_tokens=drop_tokens, device_type=device_type + ) + if replace_last_layer: + model.head = torch.nn.Linear(model.head.in_features, nb_classes) + if isinstance(optimizer, type): + if optimizer_params is not None: + optimizer = optimizer(model.parameters(), **optimizer_params) + else: + raise ValueError("If providing an optimiser please also supply its parameters") + + elif isinstance(model, VisionTransformer): + pretrained_cfg = model.pretrained_cfg + supplied_state_dict = model.state_dict() + supported_models = self.get_models() + if pretrained_cfg["architecture"] not in supported_models: + raise ValueError( + "Architecture not supported. Use PyTorchDeRandomizedSmoothing.get_models() " + "to get the supported model architectures." + ) + model = timm.create_model( + pretrained_cfg["architecture"], drop_tokens=drop_tokens, device_type=device_type + ) + model.load_state_dict(supplied_state_dict) + if replace_last_layer: + model.head = torch.nn.Linear(model.head.in_features, nb_classes) + + if optimizer is not None: + if not isinstance(optimizer, torch.optim.Optimizer): + raise ValueError("Optimizer error: must be a torch.optim.Optimizer instance") + + converted_optimizer: Union[torch.optim.Adam, torch.optim.SGD] + opt_state_dict = optimizer.state_dict() + if isinstance(optimizer, torch.optim.Adam): + logging.info("Converting Adam Optimiser") + converted_optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) + elif isinstance(optimizer, torch.optim.SGD): + logging.info("Converting SGD Optimiser") + converted_optimizer = torch.optim.SGD(model.parameters(), lr=1e-4) + else: + raise ValueError("Optimiser not supported for conversion") + converted_optimizer.load_state_dict(opt_state_dict) + + self.to_reshape = False + if not isinstance(model, PyTorchVisionTransformer): + raise ValueError("Vision transformer is not of PyTorchViT. Error occurred in PyTorchViT creation.") + + if model.default_cfg["input_size"][0] != input_shape[0]: + raise ValueError( + f'ViT requires {model.default_cfg["input_size"][0]} channel input,' + f" but {input_shape[0]} channels were provided." + ) + + if model.default_cfg["input_size"] != input_shape: + if verbose: + logger.warning( + " ViT expects input shape of: (%i, %i, %i) but (%i, %i, %i) specified as the input shape." + " The input will be rescaled to (%i, %i, %i)", + *model.default_cfg["input_size"], + *input_shape, + *model.default_cfg["input_size"], + ) - if not self.logits: - return np.asarray((outputs >= self.threshold)) - return np.asarray( - (torch.nn.functional.softmax(torch.from_numpy(outputs), dim=1) >= self.threshold).type(torch.int) + self.to_reshape = True + output_shape = model.default_cfg["input_size"] + + # set the method back to avoid unexpected side effects later on should timm need to be reused. + timm.models.vision_transformer._create_vision_transformer = tmp_func + self.mode = "ViT" + else: + if isinstance(model, torch.nn.Module): + self.mode = "CNN" + output_shape = input_shape + self.to_reshape = False + + elif algorithm == "levine2020": + if ablation_type is None or threshold is None or logits is None: + raise ValueError( + "If using CNN please specify if the model returns logits, " + " the prediction threshold, and ablation type" + ) + self.mode = "CNN" + # input channels are internally doubled. + input_shape = (input_shape[0] * 2, input_shape[1], input_shape[2]) + output_shape = input_shape + self.to_reshape = False + + if optimizer is None or isinstance(optimizer, torch.optim.Optimizer): + super().__init__( + model=model, + loss=loss, + input_shape=input_shape, + nb_classes=nb_classes, + optimizer=optimizer, + channels_first=channels_first, + clip_values=clip_values, + preprocessing_defences=preprocessing_defences, + postprocessing_defences=postprocessing_defences, + preprocessing=preprocessing, + device_type=device_type, + ) + else: + raise ValueError("Error occurred in optimizer creation") + + self.threshold = threshold + self.logits = logits + self.ablation_size = (ablation_size,) + self.algorithm = algorithm + self.ablation_type = ablation_type + if verbose: + logger.info(self.model) + + from art.estimators.certification.derandomized_smoothing.ablators.pytorch import ( + ColumnAblatorPyTorch, + BlockAblatorPyTorch, ) - def predict( - self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs - ) -> np.ndarray: # type: ignore + if TYPE_CHECKING: + self.ablator: Union[ColumnAblatorPyTorch, BlockAblatorPyTorch] + + if self.mode is None: + raise ValueError("Model type not recognized.") + + if ablation_type in {"column", "row"}: + self.ablator = ColumnAblatorPyTorch( + ablation_size=ablation_size, + channels_first=True, + ablation_mode=ablation_type, + to_reshape=self.to_reshape, + original_shape=input_shape, + output_shape=output_shape, + device_type=device_type, + algorithm=algorithm, + mode=self.mode, + ) + elif ablation_type == "block": + self.ablator = BlockAblatorPyTorch( + ablation_size=ablation_size, + channels_first=True, + to_reshape=self.to_reshape, + original_shape=input_shape, + output_shape=output_shape, + device_type=device_type, + algorithm=algorithm, + mode=self.mode, + ) + else: + raise ValueError(f"ablation_type of {ablation_type} not recognized. Must be either column, row, or block") + + @classmethod + def get_models(cls, generate_from_null: bool = False) -> List[str]: """ - Perform prediction of the given classifier for a batch of inputs, taking an expectation over transformations. + Return the supported model names to the user. - :param x: Input samples. - :param batch_size: Batch size. - :param training_mode: if to run the classifier in training mode - :return: Array of predictions of shape `(nb_inputs, nb_classes)`. + :param generate_from_null: If to re-check the creation of all the ViTs in timm from scratch. + :return: A list of compatible models """ - return DeRandomizedSmoothingMixin.predict(self, x, batch_size=batch_size, training_mode=training_mode, **kwargs) + import timm + import torch - def _fit_classifier(self, x: np.ndarray, y: np.ndarray, batch_size: int, nb_epochs: int, **kwargs) -> None: - x = x.astype(ART_NUMPY_DTYPE) - return PyTorchClassifier.fit(self, x, y, batch_size=batch_size, nb_epochs=nb_epochs, **kwargs) + supported_models = [ + "vit_base_patch8_224", + "vit_base_patch16_18x2_224", + "vit_base_patch16_224", + "vit_base_patch16_224_miil", + "vit_base_patch16_384", + "vit_base_patch16_clip_224", + "vit_base_patch16_clip_384", + "vit_base_patch16_gap_224", + "vit_base_patch16_plus_240", + "vit_base_patch16_rpn_224", + "vit_base_patch16_xp_224", + "vit_base_patch32_224", + "vit_base_patch32_384", + "vit_base_patch32_clip_224", + "vit_base_patch32_clip_384", + "vit_base_patch32_clip_448", + "vit_base_patch32_plus_256", + "vit_giant_patch14_224", + "vit_giant_patch14_clip_224", + "vit_gigantic_patch14_224", + "vit_gigantic_patch14_clip_224", + "vit_huge_patch14_224", + "vit_huge_patch14_clip_224", + "vit_huge_patch14_clip_336", + "vit_huge_patch14_xp_224", + "vit_large_patch14_224", + "vit_large_patch14_clip_224", + "vit_large_patch14_clip_336", + "vit_large_patch14_xp_224", + "vit_large_patch16_224", + "vit_large_patch16_384", + "vit_large_patch32_224", + "vit_large_patch32_384", + "vit_medium_patch16_gap_240", + "vit_medium_patch16_gap_256", + "vit_medium_patch16_gap_384", + "vit_small_patch16_18x2_224", + "vit_small_patch16_36x1_224", + "vit_small_patch16_224", + "vit_small_patch16_384", + "vit_small_patch32_224", + "vit_small_patch32_384", + "vit_tiny_patch16_224", + "vit_tiny_patch16_384", + ] + + if not generate_from_null: + return supported_models + + supported = [] + unsupported = [] + + models = timm.list_models("vit_*") + pbar = tqdm(models) + + # store in case not re-assigned in the model creation due to unsuccessful creation + tmp_func = timm.models.vision_transformer._create_vision_transformer # pylint: disable=W0212 + + for model in pbar: + pbar.set_description(f"Testing {model} creation") + try: + _ = cls( + model=model, + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + load_pretrained=False, + replace_last_layer=True, + verbose=False, + ) + supported.append(model) + except (TypeError, AttributeError): + unsupported.append(model) + timm.models.vision_transformer._create_vision_transformer = tmp_func # pylint: disable=W0212 + + if supported != supported_models: + logger.warning( + "Difference between the generated and fixed model list. Although not necessarily " + "an error, this may point to the timm library being updated." + ) + + return supported + + @staticmethod + def create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> "PyTorchVisionTransformer": + """ + Creates a vision transformer using PyTorchViT which controls the forward pass of the model + + :param variant: The name of the vision transformer to load + :param pretrained: If to load pre-trained weights + :return: A ViT with the required methods needed for ART + """ + + from timm.models._builder import build_model_with_cfg + from timm.models.vision_transformer import checkpoint_filter_fn + from art.estimators.certification.derandomized_smoothing.vision_transformers.pytorch import ( + PyTorchVisionTransformer, + ) + + return build_model_with_cfg( + PyTorchVisionTransformer, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs, + ) def fit( # pylint: disable=W0221 self, @@ -157,10 +438,15 @@ def fit( # pylint: disable=W0221 training_mode: bool = True, drop_last: bool = False, scheduler: Optional[Any] = None, + update_batchnorm: bool = True, + batchnorm_update_epochs: int = 1, + transform: Optional["torchvision.transforms.transforms.Compose"] = None, + verbose: bool = True, **kwargs, ) -> None: """ Fit the classifier on the training set `(x, y)`. + :param x: Training data. :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of shape (nb_samples,). @@ -171,6 +457,13 @@ def fit( # pylint: disable=W0221 the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) :param scheduler: Learning rate scheduler to run at the start of every epoch. + :param update_batchnorm: ViT specific argument. + If to run the training data through the model to update any batch norm statistics prior + to training. Useful on small datasets when using pre-trained ViTs. + :param batchnorm_update_epochs: ViT specific argument. How many times to forward pass over the training data + to pre-adjust the batchnorm statistics. + :param transform: ViT specific argument. Torchvision compose of relevant augmentation transformations to apply. + :param verbose: if to display training progress bars :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch and providing it takes no effect. """ @@ -187,14 +480,14 @@ def fit( # pylint: disable=W0221 # Apply preprocessing x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) + if update_batchnorm and self.mode == "ViT": # VIT specific + self.update_batchnorm(x_preprocessed, batch_size, nb_epochs=batchnorm_update_epochs) + # Check label shape y_preprocessed = self.reduce_labels(y_preprocessed) num_batch = len(x_preprocessed) / float(batch_size) - if drop_last: - num_batch = int(np.floor(num_batch)) - else: - num_batch = int(np.ceil(num_batch)) + num_batch = int(np.floor(num_batch)) if drop_last else int(np.ceil(num_batch)) ind = np.arange(len(x_preprocessed)) # Start training @@ -202,12 +495,21 @@ def fit( # pylint: disable=W0221 # Shuffle the examples random.shuffle(ind) + epoch_acc = [] + epoch_loss = [] + epoch_batch_sizes = [] + + pbar = tqdm(range(num_batch), disable=not verbose) + # Train for one epoch - for m in range(num_batch): - i_batch = np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]) - i_batch = self.ablator.forward(i_batch) + for m in pbar: + i_batch = self.ablator.forward(np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]])) + + if transform is not None and self.mode == "ViT": # VIT specific + i_batch = transform(i_batch) - i_batch = torch.from_numpy(i_batch).to(self._device) + if isinstance(i_batch, np.ndarray): + i_batch = torch.from_numpy(i_batch).to(self._device) o_batch = torch.from_numpy(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).to(self._device) # Zero the parameter gradients @@ -215,7 +517,7 @@ def fit( # pylint: disable=W0221 # Perform prediction try: - model_outputs = self._model(i_batch) + model_outputs = self.model(i_batch) except ValueError as err: if "Expected more than 1 value per channel when training" in str(err): logger.exception( @@ -224,8 +526,8 @@ def fit( # pylint: disable=W0221 ) raise err - # Form the loss function - loss = self._loss(model_outputs[-1], o_batch) + loss = self.loss(model_outputs, o_batch) + acc = self.get_accuracy(preds=model_outputs, labels=o_batch) # Do training if self._use_amp: # pragma: no cover @@ -237,7 +539,214 @@ def fit( # pylint: disable=W0221 else: loss.backward() - self._optimizer.step() + self.optimizer.step() + + epoch_acc.append(acc) + epoch_loss.append(loss.cpu().detach().numpy()) + epoch_batch_sizes.append(len(i_batch)) + + if verbose: + pbar.set_description( + f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} " + f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} " + ) if scheduler is not None: scheduler.step() + + @staticmethod + def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: + """ + Helper function to get the accuracy during training. + + :param preds: model predictions. + :param labels: ground truth labels (not one hot). + :return: prediction accuracy. + """ + if not isinstance(preds, np.ndarray): + preds = preds.detach().cpu().numpy() + + if not isinstance(labels, np.ndarray): + labels = labels.detach().cpu().numpy() + + return np.sum(np.argmax(preds, axis=1) == labels) / len(labels) + + def update_batchnorm(self, x: np.ndarray, batch_size: int, nb_epochs: int = 1) -> None: + """ + Method to update the batchnorm of a neural network on small datasets when it was pre-trained + + :param x: Training data. + :param batch_size: Size of batches. + :param nb_epochs: How many times to forward pass over the input data + """ + import torch + + if self.mode != "ViT": + raise ValueError("Accessing a ViT specific functionality while running in CNN mode") + + self.model.train() + + ind = np.arange(len(x)) + num_batch = int(len(x) / float(batch_size)) + + with torch.no_grad(): + for _ in tqdm(range(nb_epochs)): + for m in tqdm(range(num_batch)): + i_batch = self.ablator.forward( + np.copy(x[ind[m * batch_size : (m + 1) * batch_size]]), column_pos=random.randint(0, x.shape[3]) + ) + _ = self.model(i_batch) + + def eval_and_certify( + self, + x: np.ndarray, + y: np.ndarray, + size_to_certify: int, + batch_size: int = 128, + verbose: bool = True, + ) -> Tuple["torch.Tensor", "torch.Tensor"]: + """ + Evaluates the ViT's normal and certified performance over the supplied data. + + :param x: Evaluation data. + :param y: Evaluation labels. + :param size_to_certify: The size of the patch to certify against. + If not provided will default to the ablation size. + :param batch_size: batch size when evaluating. + :param verbose: If to display the progress bar + :return: The accuracy and certified accuracy over the dataset + """ + import torch + + self.model.eval() + y = check_and_transform_label_format(y, nb_classes=self.nb_classes) + + # Apply preprocessing + x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) + + # Check label shape + y_preprocessed = self.reduce_labels(y_preprocessed) + + num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) + pbar = tqdm(range(num_batch), disable=not verbose) + accuracy = torch.tensor(0.0).to(self._device) + cert_sum = torch.tensor(0.0).to(self._device) + n_samples = 0 + + with torch.no_grad(): + for m in pbar: + if m == (num_batch - 1): + i_batch = np.copy(x_preprocessed[m * batch_size :]) + o_batch = y_preprocessed[m * batch_size :] + else: + i_batch = np.copy(x_preprocessed[m * batch_size : (m + 1) * batch_size]) + o_batch = y_preprocessed[m * batch_size : (m + 1) * batch_size] + + pred_counts = np.zeros((len(i_batch), self.nb_classes)) + if self.ablation_type in {"column", "row"}: + for pos in range(i_batch.shape[-1]): + ablated_batch = self.ablator.forward(i_batch, column_pos=pos) + # Perform prediction + model_outputs = self.model(ablated_batch) + + if self.algorithm == "salman2021": + pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1).cpu()] += 1 + else: + if self.logits: + model_outputs = torch.nn.functional.softmax(model_outputs, dim=1) + model_outputs = model_outputs >= self.threshold + pred_counts += model_outputs.cpu().numpy() + + else: + for column_pos in range(i_batch.shape[-1]): + for row_pos in range(i_batch.shape[-2]): + ablated_batch = self.ablator.forward(i_batch, column_pos=column_pos, row_pos=row_pos) + model_outputs = self.model(ablated_batch) + + if self.algorithm == "salman2021": + pred_counts[np.arange(0, len(i_batch)), model_outputs.argmax(dim=-1).cpu()] += 1 + else: + if self.logits: + model_outputs = torch.nn.functional.softmax(model_outputs, dim=1) + model_outputs = model_outputs >= self.threshold + pred_counts += model_outputs.cpu().numpy() + + _, cert_and_correct, top_predicted_class = self.ablator.certify( + pred_counts, size_to_certify=size_to_certify, label=o_batch + ) + cert_sum += torch.sum(cert_and_correct) + o_batch = torch.from_numpy(o_batch).to(self.device) + accuracy += torch.sum(top_predicted_class == o_batch) + n_samples += len(cert_and_correct) + + pbar.set_description(f"Normal Acc {accuracy / n_samples:.3f} " f"Cert Acc {cert_sum / n_samples:.3f}") + + return (accuracy / n_samples), (cert_sum / n_samples) + + def _predict_classifier( + self, x: Union[np.ndarray, "torch.Tensor"], batch_size: int, training_mode: bool, **kwargs + ) -> np.ndarray: + import torch + + if isinstance(x, torch.Tensor): + x_numpy = x.cpu().numpy() + + outputs = PyTorchClassifier.predict( + self, x=x_numpy, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + + if self.algorithm == "levine2020": + if not self.logits: + return np.asarray((outputs >= self.threshold)) + return np.asarray( + (torch.nn.functional.softmax(torch.from_numpy(outputs), dim=1) >= self.threshold).type(torch.int) + ) + return outputs + + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: + """ + Performs cumulative predictions over every ablation location + + :param x: Unablated image + :param batch_size: the batch size for the prediction + :param training_mode: if to run the classifier in training mode + :return: cumulative predictions after sweeping over all the ablation configurations. + """ + if self._channels_first: + columns_in_data = x.shape[-1] + rows_in_data = x.shape[-2] + else: + columns_in_data = x.shape[-2] + rows_in_data = x.shape[-3] + + if self.ablation_type in {"column", "row"}: + if self.ablation_type == "column": + ablate_over_range = columns_in_data + else: + # image will be transposed, so loop over the number of rows + ablate_over_range = rows_in_data + + for ablation_start in range(ablate_over_range): + ablated_x = self.ablator.forward(np.copy(x), column_pos=ablation_start) + if ablation_start == 0: + preds = self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + else: + preds += self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + elif self.ablation_type == "block": + for xcorner in range(rows_in_data): + for ycorner in range(columns_in_data): + ablated_x = self.ablator.forward(np.copy(x), row_pos=xcorner, column_pos=ycorner) + if ycorner == 0 and xcorner == 0: + preds = self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + else: + preds += self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + + return preds diff --git a/art/estimators/certification/derandomized_smoothing/tensorflow.py b/art/estimators/certification/derandomized_smoothing/tensorflow.py index 504ddefda6..6cc958acb3 100644 --- a/art/estimators/certification/derandomized_smoothing/tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/tensorflow.py @@ -28,22 +28,21 @@ import numpy as np from tqdm import tqdm +from art.estimators.certification.derandomized_smoothing.derandomized import DeRandomizedSmoothingMixin from art.estimators.classification.tensorflow import TensorFlowV2Classifier -from art.estimators.certification.derandomized_smoothing.derandomized_smoothing import DeRandomizedSmoothingMixin from art.utils import check_and_transform_label_format if TYPE_CHECKING: # pylint: disable=C0412 import tensorflow as tf - - from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE + from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE, ABLATOR_TYPE from art.defences.preprocessor import Preprocessor from art.defences.postprocessor import Postprocessor logger = logging.getLogger(__name__) -class TensorFlowV2DeRandomizedSmoothing(DeRandomizedSmoothingMixin, TensorFlowV2Classifier): +class TensorFlowV2DeRandomizedSmoothing(TensorFlowV2Classifier, DeRandomizedSmoothingMixin): """ Implementation of (De)Randomized Smoothing applied to classifier predictions as introduced in Levine et al. (2020). @@ -106,6 +105,8 @@ def __init__( used for data preprocessing. The first value will be subtracted from the input. The input will then be divided by the second one. """ + # input channels are internally doubled for the certification algorithm. + input_shape = (input_shape[0], input_shape[1], input_shape[2] * 2) super().__init__( model=model, nb_classes=nb_classes, @@ -118,12 +119,31 @@ def __init__( preprocessing_defences=preprocessing_defences, postprocessing_defences=postprocessing_defences, preprocessing=preprocessing, - ablation_type=ablation_type, - ablation_size=ablation_size, - threshold=threshold, - logits=logits, ) + self.ablation_type = ablation_type + self.logits = logits + self.threshold = threshold + self._channels_first = channels_first + + from art.estimators.certification.derandomized_smoothing.ablators.tensorflow import ( + ColumnAblator, + BlockAblator, + ) + + if TYPE_CHECKING: + self.ablator: ABLATOR_TYPE # pylint: disable=used-before-assignment + + if self.ablation_type in {"column", "row"}: + row_ablation_mode = self.ablation_type == "row" + self.ablator = ColumnAblator( + ablation_size=ablation_size, channels_first=self._channels_first, row_ablation_mode=row_ablation_mode + ) + elif self.ablation_type == "block": + self.ablator = BlockAblator(ablation_size=ablation_size, channels_first=self._channels_first) + else: + raise ValueError("Ablation type not supported. Must be either column or block") + def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: import tensorflow as tf @@ -134,10 +154,9 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo outputs = tf.nn.softmax(outputs) return np.asarray(outputs >= self.threshold).astype(int) - def _fit_classifier(self, x: np.ndarray, y: np.ndarray, batch_size: int, nb_epochs: int, **kwargs) -> None: - return TensorFlowV2Classifier.fit(self, x, y, batch_size=batch_size, nb_epochs=nb_epochs, **kwargs) - - def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, **kwargs) -> None: + def fit( # pylint: disable=W0221 + self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, verbose: bool = True, **kwargs + ) -> None: """ Fit the classifier on the training set `(x, y)`. @@ -146,6 +165,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. + :param verbose: if to display training progress bars :param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports "scheduler" which is an optional function that will be called at the end of every epoch to adjust the learning rate. @@ -171,6 +191,7 @@ def train_step(model, images, labels): loss = self.loss_object(labels, predictions) gradients = tape.gradient(loss, model.trainable_variables) self.optimizer.apply_gradients(zip(gradients, model.trainable_variables)) + return loss, predictions else: train_step = self._train_step @@ -186,27 +207,137 @@ def train_step(model, images, labels): if self._reduce_labels: y_preprocessed = np.argmax(y_preprocessed, axis=1) - for epoch in tqdm(range(nb_epochs)): + for epoch in tqdm(range(nb_epochs), desc="Epochs"): num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) + + epoch_acc = [] + epoch_loss = [] + epoch_batch_sizes = [] + + pbar = tqdm(range(num_batch), disable=not verbose) + ind = np.arange(len(x_preprocessed)) - for m in range(num_batch): + for m in pbar: i_batch = np.copy(x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]) labels = y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]] images = self.ablator.forward(i_batch) - train_step(self.model, images, labels) + + if self._train_step is None: + loss, predictions = train_step(self.model, images, labels) + acc = np.sum(np.argmax(predictions.numpy(), axis=1) == np.argmax(labels, axis=1)) / len(labels) + epoch_acc.append(acc) + epoch_loss.append(loss.numpy()) + epoch_batch_sizes.append(len(i_batch)) + else: + train_step(self.model, images, labels) + + if verbose: + if self._train_step is None: + pbar.set_description( + f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} " + f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} " + ) + else: + pbar.set_description("Batches") if scheduler is not None: scheduler(epoch) - def predict( - self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs - ) -> np.ndarray: # type: ignore + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: """ - Perform prediction of the given classifier for a batch of inputs + Performs cumulative predictions over every ablation location - :param x: Input samples. - :param batch_size: Batch size. + :param x: Unablated image + :param batch_size: the batch size for the prediction :param training_mode: if to run the classifier in training mode - :return: Array of predictions of shape `(nb_inputs, nb_classes)`. + :return: cumulative predictions after sweeping over all the ablation configurations. + """ + if self._channels_first: + columns_in_data = x.shape[-1] + rows_in_data = x.shape[-2] + else: + columns_in_data = x.shape[-2] + rows_in_data = x.shape[-3] + + if self.ablation_type in {"column", "row"}: + if self.ablation_type == "column": + ablate_over_range = columns_in_data + else: + # image will be transposed, so loop over the number of rows + ablate_over_range = rows_in_data + + for ablation_start in range(ablate_over_range): + ablated_x = self.ablator.forward(np.copy(x), column_pos=ablation_start) + if ablation_start == 0: + preds = self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + else: + preds += self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + elif self.ablation_type == "block": + for xcorner in range(rows_in_data): + for ycorner in range(columns_in_data): + ablated_x = self.ablator.forward(np.copy(x), row_pos=xcorner, column_pos=ycorner) + if ycorner == 0 and xcorner == 0: + preds = self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + else: + preds += self._predict_classifier( + ablated_x, batch_size=batch_size, training_mode=training_mode, **kwargs + ) + return preds + + def eval_and_certify( + self, + x: np.ndarray, + y: np.ndarray, + size_to_certify: int, + batch_size: int = 128, + verbose: bool = True, + ) -> Tuple["tf.Tensor", "tf.Tensor"]: + """ + Evaluates the normal and certified performance over the supplied data. + + :param x: Evaluation data. + :param y: Evaluation labels. + :param size_to_certify: The size of the patch to certify against. + If not provided will default to the ablation size. + :param batch_size: batch size when evaluating. + :param verbose: If to display the progress bar + :return: The accuracy and certified accuracy over the dataset """ - return DeRandomizedSmoothingMixin.predict(self, x, batch_size=batch_size, training_mode=training_mode, **kwargs) + import tensorflow as tf + + y = check_and_transform_label_format(y, nb_classes=self.nb_classes) + + # Apply preprocessing + x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) + + num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) + pbar = tqdm(range(num_batch), disable=not verbose) + accuracy = tf.constant(np.array(0.0), dtype=tf.dtypes.int32) + cert_sum = tf.constant(np.array(0.0), dtype=tf.dtypes.int32) + n_samples = 0 + + for m in pbar: + if m == (num_batch - 1): + i_batch = np.copy(x_preprocessed[m * batch_size :]) + o_batch = y_preprocessed[m * batch_size :] + else: + i_batch = np.copy(x_preprocessed[m * batch_size : (m + 1) * batch_size]) + o_batch = y_preprocessed[m * batch_size : (m + 1) * batch_size] + + pred_counts = self.predict(i_batch) + + _, cert_and_correct, top_predicted_class = self.ablator.certify( + pred_counts, size_to_certify=size_to_certify, label=o_batch + ) + cert_sum += tf.math.reduce_sum(tf.where(cert_and_correct, 1, 0)) + accuracy += tf.math.reduce_sum(tf.where(top_predicted_class == np.argmax(o_batch, axis=-1), 1, 0)) + n_samples += len(cert_and_correct) + + pbar.set_description(f"Normal Acc {accuracy / n_samples:.3f} " f"Cert Acc {cert_sum / n_samples:.3f}") + return (accuracy / n_samples), (cert_sum / n_samples) diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/__init__.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py new file mode 100644 index 0000000000..48f96eefab --- /dev/null +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py @@ -0,0 +1,196 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# PatchEmbed class adapted from the implementation in https://github.com/MadryLab/smoothed-vit +# +# Original License: +# +# MIT License +# +# Copyright (c) 2021 Madry Lab +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE + +""" +Implements functionality for running Vision Transformers in ART +""" +from typing import Optional + +import torch +from timm.models.vision_transformer import VisionTransformer + + +class PatchEmbed(torch.nn.Module): + """ + Image to Patch Embedding + + Class adapted from the implementation in https://github.com/MadryLab/smoothed-vit + + Original License stated above. + """ + + def __init__(self, patch_size: int = 16, in_channels: int = 1, embed_dim: int = 768): + """ + Specifies the configuration for the convolutional layer. + + :param patch_size: The patch size used by the ViT. + :param in_channels: Number of input channels. + :param embed_dim: The embedding dimension used by the ViT. + """ + super().__init__() + self.patch_size = patch_size + self.in_channels = in_channels + self.embed_dim = embed_dim + self.proj: Optional[torch.nn.Conv2d] = None + + def create(self, patch_size=None, embed_dim=None, device="cpu", **kwargs) -> None: # pylint: disable=W0613 + """ + Creates a convolution that mimics the embedding layer to be used for the ablation mask to + track where the image was ablated. + + :param patch_size: The patch size used by the ViT. + :param embed_dim: The embedding dimension used by the ViT. + :param device: Which device to set the emdedding layer to. + :param kwargs: Handles the remaining kwargs from the ViT configuration. + """ + + if patch_size is not None: + self.patch_size = patch_size + if embed_dim is not None: + self.embed_dim = embed_dim + + self.proj = torch.nn.Conv2d( + in_channels=self.in_channels, + out_channels=self.embed_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + bias=False, + ) + w_shape = self.proj.weight.shape + self.proj.weight = torch.nn.Parameter(torch.ones(w_shape).to(device)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Forward pass through the embedder. We are simply tracking the positions of the ablation mask so no gradients + are required. + + :param x: Input data corresponding to the ablation mask + :return: The embedded input + """ + if self.proj is not None: + with torch.no_grad(): + x = self.proj(x).flatten(2).transpose(1, 2) + return x + raise ValueError("Projection layer not yet created.") + + +class PyTorchVisionTransformer(VisionTransformer): + """ + Model-specific class to define the forward pass of the Vision Transformer (ViT) in PyTorch. + """ + + # Make as a class attribute to avoid being included in the + # state dictionaries of the ViT Model. + ablation_mask_embedder = PatchEmbed(in_channels=1) + + def __init__(self, **kwargs): + """ + Create a PyTorchVisionTransformer instance + + :param kwargs: keyword arguments required to create the mask embedder and the vision transformer class + """ + self.to_drop_tokens = kwargs["drop_tokens"] + + if kwargs["device_type"] == "cpu" or not torch.cuda.is_available(): + self.device = torch.device("cpu") + else: # pragma: no cover + cuda_idx = torch.cuda.current_device() + self.device = torch.device(f"cuda:{cuda_idx}") + + del kwargs["drop_tokens"] + del kwargs["device_type"] + + super().__init__(**kwargs) + self.ablation_mask_embedder.create(device=self.device, **kwargs) + + self.in_chans = kwargs["in_chans"] + self.img_size = kwargs["img_size"] + + @staticmethod + def drop_tokens(x: torch.Tensor, indexes: torch.Tensor) -> torch.Tensor: + """ + Drops the tokens which correspond to fully masked inputs + + :param x: Input data + :param indexes: positions to be ablated + :return: Input with tokens dropped where the input was fully ablated. + """ + x_no_cl, cls_token = x[:, 1:], x[:, 0:1] + shape = x_no_cl.shape + + # reshape to temporarily remove batch + x_no_cl = torch.reshape(x_no_cl, shape=(-1, shape[-1])) + indexes = torch.reshape(indexes, shape=(-1,)) + indexes = indexes.nonzero(as_tuple=True)[0] + x_no_cl = torch.index_select(x_no_cl, dim=0, index=indexes) + x_no_cl = torch.reshape(x_no_cl, shape=(shape[0], -1, shape[-1])) + return torch.cat((cls_token, x_no_cl), dim=1) + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + """ + The forward pass of the ViT. + + :param x: Input data. + :return: The input processed by the ViT backbone + """ + + ablated_input = False + if x.shape[1] == self.in_chans + 1: + ablated_input = True + + if ablated_input: + x, ablation_mask = x[:, : self.in_chans], x[:, self.in_chans : self.in_chans + 1] + + x = self.patch_embed(x) + x = self._pos_embed(x) + + if self.to_drop_tokens and ablated_input: + ones = self.ablation_mask_embedder(ablation_mask) + to_drop = torch.sum(ones, dim=2) + indexes = torch.gt(torch.where(to_drop > 1, 1, 0), 0) + x = self.drop_tokens(x, indexes) + + x = self.norm_pre(x) + x = self.blocks(x) + return self.norm(x) diff --git a/notebooks/README.md b/notebooks/README.md index 7ab184e397..95806cbf65 100644 --- a/notebooks/README.md +++ b/notebooks/README.md @@ -296,6 +296,9 @@ demonstrates using interval bound propagation for certification of neural networ

+[smoothed_vision_transformers.ipynb](smoothed_vision_transformers.ipynb) [[on nbviewer](https://nbviewer.jupyter.org/github/Trusted-AI/adversarial-robustness-toolbox/blob/main/notebooks/smoothed_vision_transformers.ipynb)] +Demonstrates training a neural network using smoothed vision transformers for certified performance against patch attacks. + ## MNIST [fabric_for_deep_learning_adversarial_samples_fashion_mnist.ipynb](fabric_for_deep_learning_adversarial_samples_fashion_mnist.ipynb) [[on nbviewer](https://nbviewer.jupyter.org/github/Trusted-AI/adversarial-robustness-toolbox/blob/main/notebooks/fabric_for_deep_learning_adversarial_samples_fashion_mnist.ipynb)] diff --git a/notebooks/smoothed_vision_transformers.ipynb b/notebooks/smoothed_vision_transformers.ipynb new file mode 100644 index 0000000000..325cecf976 --- /dev/null +++ b/notebooks/smoothed_vision_transformers.ipynb @@ -0,0 +1,1220 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "58063edd", + "metadata": {}, + "source": [ + "# Certification of Vision Transformers" + ] + }, + { + "cell_type": "markdown", + "id": "0438abb9", + "metadata": {}, + "source": [ + "In this notebook we will go over how to use the PyTorchSmoothedViT tool and be able to certify vision transformers against patch attacks!\n", + "\n", + "### Overview\n", + "\n", + "This method was introduced in Certified Patch Robustness via Smoothed Vision Transformers (https://arxiv.org/abs/2110.07719). The core technique is one of *image ablations*, where the image is blanked out except for certain regions. By ablating the input in different ways every time we can obtain many predicitons for a single input. Now, as we are ablating large parts of the image the attacker's patch attack is also getting removed in many predictions. Based on factors like the size of the adversarial patch and the size of the retained part of the image the attacker will only be able to influence a limited number of predictions. In fact, if the attacker has a $m x m$ patch attack and the retained part of the image is a column of width $s$ then the maximum number of predictions $\\Delta$ that could be affected are: \n", + "\n", + "

$\\Delta = m + s - 1$

\n", + "\n", + "Based on this relationship we can derive a simple but effective criterion that if we are making many predictions for an image and the highest predicted class $c_t$ has been predicted $k_t$ times and the second most predicted class $c_{t-1}$ has been predicted $k_{t-1}$ times then we have a certified prediction for $c_t$ if: \n", + "\n", + "\n", + "

$k_t - k_{t-1} > 2\\Delta$

\n", + "\n", + "Intuitivly we are saying that even if $k$ predictions were adversarially influenced and those predictions were to change, then the model will *still* have predicted class $c_t$.\n", + "\n", + "### What's special about Vision Transformers?\n", + "\n", + "The formulation above is very generic and it can be applied to any nerual network model, in fact the original paper which proposed it (https://arxiv.org/abs/2110.07719) considered the case with convolutional nerual networks. \n", + "\n", + "However, Vision Transformers (ViTs) are well siuted to this task of predicting with vision ablations for two key reasons: \n", + "\n", + "+ ViTs first tokenize the input into a series of image regions which get embedded and then processed through the neural network. Thus, by considering the input as a set of tokens we can drop tokens which correspond to fully masked (i.e ablated)regions significantly saving on the compute costs. \n", + "\n", + "+ Secondly, the ViT's self attention layer enables sharing of information globally at every layer. In contrast convolutional neural networks build up the receptive field over a series of layers. Hence, ViTs can be more effective at classifying an image based on its small unablated regions.\n", + "\n", + "Let's see how to use these tools!" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "aeb27667", + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "import numpy as np\n", + "import torch\n", + "\n", + "sys.path.append(\"..\")\n", + "from torchvision import datasets\n", + "from matplotlib import pyplot as plt\n", + "\n", + "# The core tool is PyTorchSmoothedViT which can be imported as follows:\n", + "from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing\n", + "\n", + "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "80541a3a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Files already downloaded and verified\n", + "Files already downloaded and verified\n" + ] + } + ], + "source": [ + "# Function to fetch the cifar-10 data\n", + "def get_cifar_data():\n", + " \"\"\"\n", + " Get CIFAR-10 data.\n", + " :return: cifar train/test data.\n", + " \"\"\"\n", + " train_set = datasets.CIFAR10('./data', train=True, download=True)\n", + " test_set = datasets.CIFAR10('./data', train=False, download=True)\n", + "\n", + " x_train = train_set.data.astype(np.float32)\n", + " y_train = np.asarray(train_set.targets)\n", + "\n", + " x_test = test_set.data.astype(np.float32)\n", + " y_test = np.asarray(test_set.targets)\n", + "\n", + " x_train = np.moveaxis(x_train, [3], [1])\n", + " x_test = np.moveaxis(x_test, [3], [1])\n", + "\n", + " x_train = x_train / 255.0\n", + " x_test = x_test / 255.0\n", + "\n", + " return (x_train, y_train), (x_test, y_test)\n", + "\n", + "\n", + "(x_train, y_train), (x_test, y_test) = get_cifar_data()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "2ac0c5b3", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/plain": [ + "['vit_base_patch8_224',\n", + " 'vit_base_patch16_18x2_224',\n", + " 'vit_base_patch16_224',\n", + " 'vit_base_patch16_224_miil',\n", + " 'vit_base_patch16_384',\n", + " 'vit_base_patch16_clip_224',\n", + " 'vit_base_patch16_clip_384',\n", + " 'vit_base_patch16_gap_224',\n", + " 'vit_base_patch16_plus_240',\n", + " 'vit_base_patch16_rpn_224',\n", + " 'vit_base_patch16_xp_224',\n", + " 'vit_base_patch32_224',\n", + " 'vit_base_patch32_384',\n", + " 'vit_base_patch32_clip_224',\n", + " 'vit_base_patch32_clip_384',\n", + " 'vit_base_patch32_clip_448',\n", + " 'vit_base_patch32_plus_256',\n", + " 'vit_giant_patch14_224',\n", + " 'vit_giant_patch14_clip_224',\n", + " 'vit_gigantic_patch14_224',\n", + " 'vit_gigantic_patch14_clip_224',\n", + " 'vit_huge_patch14_224',\n", + " 'vit_huge_patch14_clip_224',\n", + " 'vit_huge_patch14_clip_336',\n", + " 'vit_huge_patch14_xp_224',\n", + " 'vit_large_patch14_224',\n", + " 'vit_large_patch14_clip_224',\n", + " 'vit_large_patch14_clip_336',\n", + " 'vit_large_patch14_xp_224',\n", + " 'vit_large_patch16_224',\n", + " 'vit_large_patch16_384',\n", + " 'vit_large_patch32_224',\n", + " 'vit_large_patch32_384',\n", + " 'vit_medium_patch16_gap_240',\n", + " 'vit_medium_patch16_gap_256',\n", + " 'vit_medium_patch16_gap_384',\n", + " 'vit_small_patch16_18x2_224',\n", + " 'vit_small_patch16_36x1_224',\n", + " 'vit_small_patch16_224',\n", + " 'vit_small_patch16_384',\n", + " 'vit_small_patch32_224',\n", + " 'vit_small_patch32_384',\n", + " 'vit_tiny_patch16_224',\n", + " 'vit_tiny_patch16_384']" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# There are a few ways we can interface with PyTorchSmoothedViT. \n", + "# The most direct way to get setup is by specifying the name of a supported transformer.\n", + "# Behind the scenes we are using the timm library (link: https://github.com/huggingface/pytorch-image-models).\n", + "\n", + "\n", + "# We currently support ViTs generated via: \n", + "# https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py\n", + "# Support for other architectures can be added in. Consider raising a feature or pull request to have \n", + "# additional models supported.\n", + "\n", + "# We can see all the models supported by using the .get_models() method:\n", + "PyTorchDeRandomizedSmoothing.get_models()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e8bac618", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:Running algorithm: salman2021\n", + "INFO:root:Converting Adam Optimiser\n", + "WARNING:art.estimators.certification.derandomized_smoothing.pytorch: ViT expects input shape of: (3, 224, 224) but (3, 32, 32) specified as the input shape. The input will be rescaled to (3, 224, 224)\n", + "INFO:art.estimators.classification.pytorch:Inferred 9 hidden layers on PyTorch classifier.\n", + "INFO:art.estimators.certification.derandomized_smoothing.pytorch:PyTorchViT(\n", + " (patch_embed): PatchEmbed(\n", + " (proj): Conv2d(3, 384, kernel_size=(16, 16), stride=(16, 16))\n", + " (norm): Identity()\n", + " )\n", + " (pos_drop): Dropout(p=0.0, inplace=False)\n", + " (patch_drop): Identity()\n", + " (norm_pre): Identity()\n", + " (blocks): Sequential(\n", + " (0): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (1): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (2): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (3): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (4): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (5): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (6): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (7): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (8): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (9): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (10): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (11): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " )\n", + " (norm): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (fc_norm): Identity()\n", + " (head_drop): Dropout(p=0.0, inplace=False)\n", + " (head): Linear(in_features=384, out_features=10, bias=True)\n", + ")\n" + ] + } + ], + "source": [ + "import timm\n", + "\n", + "# We can setup the PyTorchSmoothedViT if we start with a ViT model directly.\n", + "\n", + "vit_model = timm.create_model('vit_small_patch16_224')\n", + "optimizer = torch.optim.Adam(vit_model.parameters(), lr=1e-4)\n", + "\n", + "art_model = PyTorchDeRandomizedSmoothing(model=vit_model, # Name of the model acitecture to load\n", + " loss=torch.nn.CrossEntropyLoss(), # loss function to use\n", + " optimizer=optimizer, # the optimizer to use: note! this is not initialised here we just supply the class!\n", + " input_shape=(3, 32, 32), # the input shape of the data: Note! that if this is a different shape to what the ViT expects it will be re-scaled\n", + " nb_classes=10,\n", + " ablation_size=4, # Size of the retained column\n", + " replace_last_layer=True, # Replace the last layer with a new set of weights to fine tune on new data\n", + " load_pretrained=True) # if to load pre-trained weights for the ViT" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "353ef5a6", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:Running algorithm: salman2021\n", + "INFO:timm.models._builder:Loading pretrained weights from Hugging Face hub (timm/vit_small_patch16_224.augreg_in21k_ft_in1k)\n", + "INFO:timm.models._hub:[timm/vit_small_patch16_224.augreg_in21k_ft_in1k] Safe alternative available for 'pytorch_model.bin' (as 'model.safetensors'). Loading weights using safetensors.\n", + "WARNING:art.estimators.certification.derandomized_smoothing.pytorch: ViT expects input shape of: (3, 224, 224) but (3, 32, 32) specified as the input shape. The input will be rescaled to (3, 224, 224)\n", + "INFO:art.estimators.classification.pytorch:Inferred 9 hidden layers on PyTorch classifier.\n", + "INFO:art.estimators.certification.derandomized_smoothing.pytorch:PyTorchViT(\n", + " (patch_embed): PatchEmbed(\n", + " (proj): Conv2d(3, 384, kernel_size=(16, 16), stride=(16, 16))\n", + " (norm): Identity()\n", + " )\n", + " (pos_drop): Dropout(p=0.0, inplace=False)\n", + " (patch_drop): Identity()\n", + " (norm_pre): Identity()\n", + " (blocks): Sequential(\n", + " (0): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (1): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (2): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (3): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (4): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (5): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (6): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (7): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (8): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (9): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (10): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " (11): Block(\n", + " (norm1): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (attn): Attention(\n", + " (qkv): Linear(in_features=384, out_features=1152, bias=True)\n", + " (q_norm): Identity()\n", + " (k_norm): Identity()\n", + " (attn_drop): Dropout(p=0.0, inplace=False)\n", + " (proj): Linear(in_features=384, out_features=384, bias=True)\n", + " (proj_drop): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls1): Identity()\n", + " (drop_path1): Identity()\n", + " (norm2): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (mlp): Mlp(\n", + " (fc1): Linear(in_features=384, out_features=1536, bias=True)\n", + " (act): GELU(approximate='none')\n", + " (drop1): Dropout(p=0.0, inplace=False)\n", + " (norm): Identity()\n", + " (fc2): Linear(in_features=1536, out_features=384, bias=True)\n", + " (drop2): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ls2): Identity()\n", + " (drop_path2): Identity()\n", + " )\n", + " )\n", + " (norm): LayerNorm((384,), eps=1e-06, elementwise_affine=True)\n", + " (fc_norm): Identity()\n", + " (head_drop): Dropout(p=0.0, inplace=False)\n", + " (head): Linear(in_features=384, out_features=10, bias=True)\n", + ")\n" + ] + } + ], + "source": [ + "# Or we can just feed in the model name and ART will internally create the ViT.\n", + "\n", + "art_model = PyTorchDeRandomizedSmoothing(model='vit_small_patch16_224', # Name of the model acitecture to load\n", + " loss=torch.nn.CrossEntropyLoss(), # loss function to use\n", + " optimizer=torch.optim.SGD, # the optimizer to use: note! this is not initialised here we just supply the class!\n", + " optimizer_params={\"lr\": 0.01}, # the parameters to use\n", + " input_shape=(3, 32, 32), # the input shape of the data: Note! that if this is a different shape to what the ViT expects it will be re-scaled\n", + " nb_classes=10,\n", + " ablation_size=4, # Size of the retained column\n", + " replace_last_layer=True, # Replace the last layer with a new set of weights to fine tune on new data\n", + " load_pretrained=True) # if to load pre-trained weights for the ViT" + ] + }, + { + "cell_type": "markdown", + "id": "c7a4255f", + "metadata": {}, + "source": [ + "Creating a PyTorchSmoothedViT instance with the above code follows many of the general ART patterns with two caveats: \n", + "+ The optimizer would (normally) be supplied initialised into the estimator along with a pytorch model. However, here we have not yet created the model, we are just supplying the model architecture name. Hence, here we pass the class into PyTorchDeRandomizedSmoothing with the keyword arguments in optimizer_params which you would normally use to initialise it.\n", + "+ The input shape will primiarily determine if the input requires upsampling. The ViT model such as the one loaded is for images of 224 x 224 resolution, thus in our case of using CIFAR data, we will be upsampling it." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "44975815", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The shape of the ablated image is (10, 4, 224, 224)\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAh8AAAESCAYAAABdK7eSAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABCAklEQVR4nO3de3QV9bk38O/Mvue2cyM3AsgdVECLGqPW10oOIXZ5tPCHeFg9qCxZ5QTWUmxtc5bFS9uVql2t1UNxnXMs4FpSWs8SfbUtFlHCsQaqqbxeUCqUCkgSLiG3nX2d+b1/xGwJmWcgkOwb389ao2Q/e/b+zczekycz88yjKaUUiIiIiBJET/YAiIiI6OLC5IOIiIgSiskHERERJRSTDyIiIkooJh9ERESUUEw+iIiIKKGYfBAREVFCMfkgIiKihGLyQURERAnF5IOIiIgSiskHEaWMtWvX4pJLLoHX60VVVRX+8pe/JHtIRDQKmHwQUUr47W9/i9WrV+Phhx/GX//6V8yZMwe1tbU4duxYsodGRCNMY2M5IkoFVVVVuPrqq/Ef//EfAADTNDFu3DisWrUKP/jBD5I8OiIaSc7ReuG1a9fiySefRFtbG+bMmYNnnnkG11xzzVnnM00TR48eRW5uLjRNG63hEZENpRR6enpQUVEBXR/9A6SRSAQtLS1oaGiIP6brOmpqatDc3Gw5TzgcRjgcjv9smiY6OjpQVFTEfQdREgxrv6FGwebNm5Xb7Va//vWv1ccff6zuvfdelZ+fr9rb28867+HDhxUATpw4pcB0+PDh0dhFDPHFF18oAOqdd94Z9Pj3vvc9dc0111jO8/DDDyd9/XDixGnodC77jVE58vHzn/8c9957L+6++24AwLPPPovf//73+PWvf33Ww6e5ubkAgLnXVMHpHDq8rq5T4rwe3RRjBW4lxioLssRYcaEcK/JnizG37hJjDo9PjPU/wSGGTnV2ibFoTF7GfL9fjOlGVIyFI2ExFgrJMa/PI8YMGGIsGAyIsTx/rhiDkl8zEpGXz2Fz8M9hsx1ysnPksQDIzpI/N06XV4yFwhExpjSbvyR0eTkiEfk1Y8r6CEEoHMEPn34h/n1MRQ0NDVi9enX8566uLowfPz6JIzp3//JPV2LmJWP6d9VnbAKH5oBD2NaawwndKexbNA3Q9P7XG9gVfPnvD/cfwcbX/jxCoyeydy77jRFPPoZ7+PTMQ6c9PT39A3M6LZMPu18IDl0+1Op0yL+Y3S75NT0ueRV53XKC4XbIMadHjgEAHPJ7Bm3eU9flZfTavKcu/96GBjmhgynPaLduDJvrnE3DZn3brTclv6YOeb04YJMk2nzWfGfZhj6vW4y5XHLM7mzB+SYfDpvXlJKPr8aTmNMXxcXFcDgcaG9vH/R4e3s7ysrKLOfxeDzweOQkN5W5XQ7xO+LQHHAIh6z1syYfX35mBxIQbeD9Ru0MO9EQ57LfGPGTuSdOnIBhGCgtLR30eGlpKdra2oY8v7GxEX6/Pz6NGzdupIdERCnO7XZj7ty52L59e/wx0zSxfft2VFdXJ3FkaUbDV0dSeNkLpbCkl9o2NDSgq6srPh0+fDjZQyKiJFi9ejX+67/+Cxs3bsQnn3yCFStWIBAIxE/fkgX5oB5RShvxY3HDPXyazodOiWjk3HHHHTh+/DjWrFmDtrY2XHHFFdi6deuQo6h0Gh7doDQ14snH6YdPb7/9dgBfHT5duXLlOb/Op59+As3ivGfniRPiPIXydXzQiuRgsSFfHKP5SsRYwOwQY72G/CeJ0uRz/gDQF5IvEOwLyhd5Rg35+owTNif+vU55rLGY/JoOm+sM7BLKvpB8UWnMlJddCxWJMV2+PAPRsLzOfE75c9Frc/FnhxGT3xBAVpZ8MbJmczGyZnOtEGxK1/pC8kW1sajNBbdO6+0Ujtov32hZuXLlsPYT6SrcexyBDuvPpTKVeERDwfHVdR1niBkGIlHr67COHe0+r3ESjZZRuQpp9erVWLp0Ka666ipcc801eOqpp3j4lIhoFNndLlLx/AylmFFJPnj4lIiIiCSjVn91sRw+JSIiouFh8TcRUYqzOmkycBsPnlChdMTkg4goxQ1cLq4sHmPBC6WjpN/ng4iIRg6PhFA6SNkjH16nBt3qduk2twSZYFNOe0mp3NukZEyhGPPZlUza3EI2GA6JsVBULv0EAGXzum6fTV8Ym94uypTf02/TvyYWtbstvTwWw+aW7Q63vBHDEXm9RWPyesmyeU1ntjxOr818MU0uCdaVzW3nAcRs/h61u915Tra8LXoDfWIsGpPLaW26DqCn27pXkFSySSND0z3QHNafy5gRhRGzXv8xw0TMtC6DVoB4f377TytR4qVs8kFElMnkP15Ov0f6YIppBGUInnYhIkoqniihiw+TDyKipOIlo3TxYfJBRERECcXkg4goqXjahS4+TD6IiJKKtwqji0/KVrt4NQO6NvTK7txcecjTxhaIsSKf3PbUZcrlnb0dcmdTw5Rzt2Cf3BVUt29qi7z8HDHmtCkN7ezqkeez2dKFuXJ5Z0+3XG4aselOG7TpsqpsznHnZMulzdFIUIzphryALpsOu4Yhj9NpUxMbDsvzAYDbJW9kXSiVBIBw7yn5RW06JXtsuvrGTLlCoitgXYIdselmTBeur6cXXUKD7kjEgCGVzTsATbeOudwueH3WnzuXXX03URKkbPJBRJSplFJQQhtaZcoxzfYAiRpSvjtwC3abWwcRJQVPuxARZSjmHJSqmHwQESXBaFzlwStHKF0w+SAiSoLROCrBIx2ULph8EBGlmvM8hKHOf1aihGLyQUSURJbJwnkewuCRD0oXKVvtku9xwKEPzY18NmWTfpvupWPyXGLMMOUOnna9PR1Om/pGi7EPCJv2ZZpOm7pYp003VSMsl6IqhzyeY8c65de06W7a0yd3We0z5BLlHF+eGENYfj+HTVMt3aYMwOGRux0HA3KZdZZLHqdTqEYYEArJyx+MyqW2ps3frZ298lg7++TPVK9N2Xcoav25iBkstR1NTocOl8t63Wu6Bmn1G8qAKXwPdKcOh8N6n6Tb7I+IkiFlkw8iooylyV1tNU3+28U0NSho8duSDX4FzfI11cAbEqUQpsNEREk2nOs0tDP+f67PJ0olTD6IiJKMCQJdbJh8EBFlKFa+UKpi8kFElMGYgFAq4gWnREQZiqdzKFWNePLxyCOP4NFHHx302PTp0/Hpp58O63WK/V44LcpDc11yeavXK8d0h5z/+3xyiW40Jpd+mjZfbaXkUsuI1LHyS0ZELps0lU23WJvyVuWUu6z2ROTutIYhr9M+m3JMu1LNnoC8DF90yGNx6fJr5vXK2yLaJrQPBRDsksuFxxdPEWMlJZViDAC03C4xFj51Uoz19srL39Ujl9qe6JLLrP9xWB6L4bDeBZhnKSWmC+Ny6PA4hVJbGGInYlMpGEJ3Y2WYMIQybmXY3TSAKPFG5cjHZZddhjfeeOOrN7Hr505EdJHpL6e1KbUVcun+QluBUv2TZYjJJKWWUckKnE4nysrKRuOliYiIKM2NygWnn332GSoqKjBp0iQsWbIEhw4dGo23ISIiojQ04kc+qqqqsGHDBkyfPh2tra149NFH8fWvfx0fffQRcnNzhzw/HA4jHA7Hf+7u7h7pIRERZZyhdzglSh8jnnzU1dXF/z179mxUVVVhwoQJ+N3vfodly5YNeX5jY+OQC1SJiC52Z0sumHhQOhv1+3zk5+dj2rRp2L9/v2W8oaEBXV1d8enw4cOjPSQiIiJKolEvQ+nt7cWBAwfw7W9/2zLu8XjgsehUW1acBbdF19g8t9yhMydLLifVbEpU7W7Do9l0kQ0H5TJN3ebvkqJcv81YgOxsuQtrd5dcNurPk7uw9oTk5f/8C/k1e8Nyqa3bpvHp2Cybzrwum7LQk51iLKzksbhsutr684ae7htw3aVXibHuVrk8UfXZVw/4i+UuyuE+ed309sp/D3hc8muOK5OXsaSkVIy1d1uX78YME4c+OiLORxfGiEYQDVvvP5QJaMq6qsXjdMHrtv4cuJwOeDzWMWkeomQZ8eTju9/9Lm699VZMmDABR48excMPPwyHw4E777xzpN+KiCgtKdOAaXPvDe3L1OPMUy8OTYPudFomJk6nAy7htgYOqU0uUZKMePJx5MgR3HnnnTh58iTGjBmDG264Abt27cKYMWNG+q2IiDJUf9phlWTwWg/KBCOefGzevHmkX5KI6CLzVYrRn4awtoUyC4/FERGlMO20/xJlCiYfRDTqHnnkEWiaNmiaMWNGPB4KhVBfX4+ioiLk5ORg0aJFaG9vT+KIiWg0MfkgooS47LLL0NraGp/efvvteOz+++/Hq6++ihdffBFNTU04evQoFi5cmMTREtFoStmObwU5PngsOtg6I53iPB6XvDhZniwxFg7KZahRUy7tzc8vEGN2jZwihn3OF43K3UuzcnLE2NHjYTF24HO5s+nxHnkZ++QQJvjk0tfbv36FGKssl5fhf1r+Lsaa97eJsZgpd/R16vK26Ok8Lsb6euX1mZt7ltJFQz5M7vXK87ptOjNnafJ8MUPeUOPHVYix3I4ey8cjUQM7R7jUVur51NXVheeeew6bNm3CzTffDABYv349Zs6ciV27duHaa68d0XGkArfHC59P3ifJn1gHlCZ9thRiMet9mWmyqy2lFh75IKKEkHo+tbS0IBqNoqamJv7cGTNmYPz48Whubk7WcEeVy+mM3+PozMntdsMjTA6nE7quWU6AglKGMNnclIcoCVL2yAcRZQ67nk9tbW1wu93Iz88fNE9paSna2uSjXewLRZS+mHwQ0aiz6/nk8/nO6zXZF4ooffG0CxEl3Ok9n8rKyhCJRNDZ2TnoOe3t7ZbXiAxgXyii9MXkg4gSbqDnU3l5OebOnQuXy4Xt27fH4/v27cOhQ4dQXV0tvobH40FeXt6giYjSA0+7ENGos+v55Pf7sWzZMqxevRqFhYXIy8vDqlWrUF1dnZGVLkSUwsnHmIJCeN1DhxfskMtQdc2mW2ifXE4bjMhlik5NLn3si8rla3aHlIJRuSwUAPIL5L/gIoZchPf3I0fFWEe3TYdWp9wN2OGQlyTPK79midO6hBMAvB1yCevUPPkwe2uhPJb2zmNiLNwnr+/3//Y3MabH5AqBaPZZ/sr2y51kocufU79fLr/MNeVtH4rIn28VkS/EvGRMtvB6NjXW5+FsPZ9+8YtfQNd1LFq0COFwGLW1tfjVr341omNIJTFTIWJYf740ABBK9WNm/7xWnC4HnEJjOc0h78eIkiFlkw8iyhxn6/nk9Xqxdu1arF27NkEjSi5DKTGJ0CDfTD1mGuJ8OnToTofl3Dq72lKK4SeSiChjsAcMpQcmH0RERJRQTD6IiIgooZh8EBEljXwRMVEmY/JBRJQ0vEaDLk6sdiEiSjAFDUpKPDRN7Fyr6w44lRBzOmHAuvOxyV09pZiU/UTmFxXD5xn6RSrIkftA6Lrccryz+5QYiwZ65dc05HtZmJDvA6Fc8qrNyfGKMQCIQo5/8nf5vhSBcECMeb0eOWZxP5UBvmz5vhMFDvleEC3728VYLCK/X9gv3+djTIG8XjTI992IxuR7w/RFgmIs0CcfEo/E7O+Dodndy8Xmj12XLgeVLt+rwSXc3wEAYmH5vipKuG+M9DiNDAUdSriHkHLo0M4ojR3YGk6HE26H9bZWcMAUkw/5Xj5EyZCyyQcREfXjyRnKNLzmg4gorfEoFaUfJh9ERGmNx0Uo/TD5ICIiooRi8kFEREQJNezkY+fOnbj11ltRUVEBTdPw8ssvD4orpbBmzRqUl5fD5/OhpqYGn3322UiNl4go7TmcTjhdbsvJ4XRBdziFyQXd6bacoDkRi8FyMuXCPKKkGHa1SyAQwJw5c3DPPfdg4cKFQ+JPPPEEnn76aWzcuBETJ07ED3/4Q9TW1mLv3r3weu1LTAfRnYBF6azmkstp7Xi88nxZsG4rDgBOm/zMrlNk1KYM1+PzizEAONEmt6PvOyGXDE8qlNdvWK42hdemnHb65LFiTLd50ZhDXt/dNmXPTkeXGMt1y9upqGCyGJs8dbwYO3joXTH26d++EGNup1y+CgBKyeXbsZj8tdOdckmkyy2vU9Pmt4tpc02Apll/hqXHaWS4vVnwZVuXh8dgipeQOpweOF3W3/NgXwThgPXnMmpfGU6UcMNOPurq6lBXV2cZU0rhqaeewkMPPYTbbrsNAPD888+jtLQUL7/8MhYvXnxhoyUiIqK0N6J/3hw8eBBtbW2oqamJP+b3+1FVVYXm5uaRfCsiIiJKUyN6k7G2tjYAQGlp6aDHS0tL47EzhcNhhE+7A2N3d/dIDomIiIhSTNJP7DY2NsLv98encePGJXtIRERENIpGNPkoK+vvy9HePrivR3t7ezx2poaGBnR1dcWnw4cPj+SQiIiIKMWM6GmXiRMnoqysDNu3b8cVV1wBoP80yu7du7FixQrLeTweDzweuekZEVGmMQwTkah108qYMmAo6+olNxzQHNbzKdOAUkIjTOH1iJJl2MlHb28v9u/fH//54MGD2LNnDwoLCzF+/Hjcd999+PGPf4ypU6fGS20rKipw++23D+t9QqEYYNE6WovKXUgBuZ4sEJCvJYlE5QNAMV0uX+3tk0tiu21iY8fZr3YVk+edUCyXTU6ukEsx+0LyfGOnzRFjbiWX057qiooxX36RGMNJuTvruLJyMdYZkLv2TpoxVYzlFcilxHkFM8XYqePydjjVJZcEA4DLpixYV3KyHTVtuijb/P4wbGopbRrlQimhq63wOI2M3r4wOnv6LGPBcAhRw/q7lZuXhzy/9QaNRsOAYf19VaZ9aThRog07+XjvvffwjW98I/7z6tWrAQBLly7Fhg0b8OCDDyIQCGD58uXo7OzEDTfcgK1btw7vHh9ERDQimEZSKhp28nHTTTfZ/lWkaRoee+wxPPbYYxc0MCIiIspMSa92ISKi0cOet5SKmHwQERFRQjH5ICLKQLzWg1LZiJbaEhHR2cUMExGhQskwTRiGdWmTYRiIxawrYYxYDEbUOmYachUVUTKkbPJhaAYMi86aypBLCu0uhPV5fWIsJ1cuxTx6XC7tPXjkuBhzuuSxuNuPijEACLXLrzu1RC6nnXeTXG564IsOMZY7dowYKy6yvjkcABw73i7G8vNtSk1NeRnculyGe+y43GXW6e0UY8c7W8XYF61y91mXS/5c5OfZ3zchGJS3v3LKBxw1m7pY06YMV9dsOtfadF82+OdxUpzs7IFHs/4M6Q4FaZOZykRMuN1ArC+IWI/15znQZdPWmigJeNqFiChTMbmkFMXkg4goGRKRGLDUhVIUkw8iomTQ0J+AnGsSwqMYlEFS9poPIqKMN5wjEzyKQRmERz6IiIgooXjkg4gowZRSMIVOgZoOSIV7ylQwYlJXWyWemuFBE0o1KZt8+P3Z8HndQx6POeVS295euZxMCe2rAaCrR+5Q+vkhuZy0t1cu0/R55YNKrQflDrsAUGqx3APGjp0gxvIrJooxV49NaahXLn2tnHONPFubXPrqi8nlwgbk7RQIyLHyLLkkOCLcFwEAtOwcMVaZXSHGcvPlMuOek21iDACOtZ8UY1FNXt+hiE33UV0+6Z/tkRs3RoI25cRu67EY/HU1qsKhMPqsm9rC5dTgdAqdawMB9AhJi8+hI89lvUt3WHQIJ0omnnYhIkoHw73glBeoUgpj8kFElA6Ge/BioJqGKAUx+SAiylQ820IpiskHERERJRSTDyKiTKPO+D9RimHyQUQXZOfOnbj11ltRUVEBTdPw8ssvD4orpbBmzRqUl5fD5/OhpqYGn3322aDndHR0YMmSJcjLy0N+fj6WLVtmW02W9mxOh2haf1ia5Jc8bUZ98P81m8aDRMmQsqW2vV0diIWGlgE6Iz3iPC6LLrhxcrNUOB1ysK9XLsMtyJU7t+Zny6WPwVP2pbYlFUVibOzs/yPGPjoSEWN/2y/HrisvFGOdnfJ8pZPniDEdQh0hgEhYLsPNV3LJbPcxuXzVF7FuJQ4A5YU2y2d4xJhrdoEYC9p0ygWAP//h/4qxI4fl5XcIpa/95F8gNk10EbX5G0MXWrCHhHbvVgKBAObMmYN77rkHCxcuHBJ/4okn8PTTT2Pjxo2YOHEifvjDH6K2thZ79+6F19v/PVmyZAlaW1uxbds2RKNR3H333Vi+fDk2bdp0zuNIJzleDwqEbto+txMeoWQ2GgwhFrL+TubmeFBcaF1WfkJ1ATh1XmMlGg0pm3wQUXqoq6tDXV2dZUwphaeeegoPPfQQbrvtNgDA888/j9LSUrz88stYvHgxPvnkE2zduhXvvvsurrrqKgDAM888g1tuuQU/+9nPUFEh34uFiNITT7sQ0ag5ePAg2traUFNTE3/M7/ejqqoKzc3NAIDm5mbk5+fHEw8AqKmpga7r2L17t/ja4XAY3d3dgyYiSg9MPoho1LS19d8JtrS0dNDjpaWl8VhbWxtKSkoGxZ1OJwoLC+PPsdLY2Ai/3x+fxo0bN8KjJ6LRwuSDiNJSQ0MDurq64tPhw4eTPSQiOkdMPoho1JSV9ffHaW8f3COpvb09HisrK8OxY8cGxWOxGDo6OuLPseLxeJCXlzdoIqL0wOSDiEbNxIkTUVZWhu3bt8cf6+7uxu7du1FdXQ0AqK6uRmdnJ1paWuLPefPNN2GaJqqqqhI+5kRQ6L8Y12rCWWISzaYayi5GlAzDrnbZuXMnnnzySbS0tKC1tRVbtmzB7bffHo/fdddd2Lhx46B5amtrsXXr1mG9j64BDovvi2HToVPZfMF0yKWDhiaX2p6SKzjR3S3vDFRYLlEt98slugBw9Te+IcYqp18rxl5a/2sxVmbT2dURCYqxL/5+QH7NSZeKMW/RFDGWreRy6b6OY2LMZ8qlr5GgXNp7okeO5Y+ROwEXlV0ixoK99n9l6zZhwy137tV0+TMcjcqfKU1osw4AmpJjsZhQ0mmc+92pent7sX///vjPBw8exJ49e1BYWIjx48fjvvvuw49//GNMnTo1XmpbUVER32/MnDkTCxYswL333otnn30W0WgUK1euxOLFizO20qWrT0GopkVuMAafw7rkPMfjQEGe9f6jcuJ4zLziMvSnNoM/R5H/tx/Y/fkFjJhoZA07+ThbTT8ALFiwAOvXr4//7PHI91IgovT23nvv4RunJcyrV68GACxduhQbNmzAgw8+iEAggOXLl6OzsxM33HADtm7dGr/HBwC88MILWLlyJebNmwdd17Fo0SI8/fTTCV+W9MdbmlJ6GHbyYVfTP8Dj8dieqyWizHHTTTfZnhLQNA2PPfYYHnvsMfE5hYWFGXtDscQ6s5Xt0KMgRKlgVK752LFjB0pKSjB9+nSsWLECJ0/Kd6ZkrT4R0UjShH8TpY4RTz4WLFiA559/Htu3b8fjjz+OpqYm1NXVwTCszzuzVp+I6MLwZAulmxG/vfrixYvj/541axZmz56NyZMnY8eOHZg3b96Q5zc0NMTPEQP9V8IzASEiIspco15qO2nSJBQXFw+6Gv50rNUnIrowPLlC6WbUG8sdOXIEJ0+eRHl5+bDm01T/dCZD6MIJAJou51JOmzRLBW1eU26yisIi666UAFCWJZf2fu2qafKLAph5nVxOe+qYXGrsickdeCdVVoox02Yhy0rGiLFYSF7GPptuuJGYPF80KH8kDcjlwge+OCLGPvzoPTF23bXyOIvK5O7C3T1ySTAAuOSPBoovkUutTZvPsBGxKZm1Ke3uOt4pxsI91gMNR+X3oguXk+NBgd9nGSvwmMgWSm0rxhajXOhCXT55GiZfcbVl7POQDuAP5zVWotEw7OTDrqa/sLAQjz76KBYtWoSysjIcOHAADz74IKZMmYLa2toRHTgRUTrTNJubgl1AbOBvtoG6Fx4VoVQ07OTDrqZ/3bp1+OCDD7Bx40Z0dnaioqIC8+fPx49+9CPe64OIaKTZZBdMPCiVDTv5OFtN/+uvv35BAyIiutidU+Iw8KQznqwBUOrLh5h9UIpibxciohRzTjmDdsb/v6S+fEwx8aAUxuSDiCiDaOABD0p9TD6IiJLlzDPY53u3MIv5mIBQKhv1UtvzZcYMmI6huVEwLJeFum06tzqdLjHm0OUyxSllcidVr0/O3S6ZIN8obc4NctdaACifPluM7WleL8bGj5PHWnbZLDHmHjNZjDmz/GKsLySX/Qa75c617UcPi7FT7XLJrBGVu9P6cr1irLhY3vaHj74vxkrLx4qxWJ+87ACggmExpgVOiTFDyR2GlVXt+Zd8HnkZ3WVyrNtj/SsqFOGvrtFU4NNQmmu9jkuKCpGXY12GO2n6NFwyzbpjdH7FeJRMse40XfBp+/kNlGiUpGzyQUSUyUar1FYInPO4iBKBp12IiDLFmQ1tiVIUkw8iokyhfZVzsNqFUhmTDyKiDCJU4BKlFCYfRERElFBMPoiIiCihUrbaxeVwwuUYOrxTPXK5pRGSDzT6sqxL1wDAoctXZpXYdK493NopxiZ/bYEYq5wlx/rJJbPRnoAY8+fKZbFjpl0hxgJO6y6ZAPDx+++KsXBQHkt3d6cYO/HFITHmMOSyZ69X/riOnSiXxc4WShMBIOaQO8y6HPlyzC13QgYAZygkxvo+/0KMmTGbzrU2fyr0OhxiLKtIXsbSCuvOvcEQu9qOpr6+MLq6rL8/Uy6di4lTZ1rGxk+ZioopUy1jLq8XQP+2HnrndfkzQJQMKZt8EBFlKvtCFA26bp1MarpDjmk6BlINXvdBqY6nXYiIMhlLbikFMfkgIspkPPxBKYjJBxFRJuGRDkoDTD6IiDIJj3RQGmDyQURERAmVstUukVAYujm03C/LIw9Z88rlhi49JsaUIcd8OfJr/vMd/yzGrqubJ8byikvFGAC0//0TMeawWY7Oni4xdvwf+8TY0R65rHLHyy+LsRyf3C01FJa7vpaVyiXBeblySeDBI3I33IjNeimsuESMTZs1V4zB8Iihjk65+y4A9NmUfZ8KymPVlPz5DgXljs69Sj7Wrnrlst+Z+cJ7yRXPNAK+VvU1fO3SCZaxmV+7DmMnTrOM+XIL4MuxLsUP9XSh48hBy1jvyWPnN1CiUZKyyQcRUaZyuZ3weD04/U4c8ZjHDZfHbTmfw+mErssHrJVpnaBKjxMlC0+7EBElDS/QoIsTkw8ioqRRGInyFBa4ULrhaRcioqQZmSMfPH5C6YZHPoiIMhSPiFCqYvJBRJSBTm8uR5RqhnXapbGxES+99BI+/fRT+Hw+XHfddXj88ccxffr0+HNCoRAeeOABbN68GeFwGLW1tfjVr36F0lL78tIzmSoCU1lcoW1RfjtAi8lXdMeU3IVU0+Svp9eTJ8aumCuXaXpcchnq3j3vizEAOHX0gBgLh+WyyZ5THWLs8P69YqxXyR1/XYb8fjlOuQw5zyuXzI4pkEttW9vbxFgsKm/Dvh65tPfwQbmLLvCxGOnt7RFjXqf9Lj3mKRFjJ2PyZ8rn84qxrFx5O/mccllwT1+3GIuZ1mW/MZvvGV24yvETMOOyyyxjRcXFcDmtd81dJ0/iiNAVOdTbhb7O45ax4+2t5zdQolEyrCMfTU1NqK+vx65du7Bt2zZEo1HMnz8fgcBXraHvv/9+vPrqq3jxxRfR1NSEo0ePYuHChSM+cCKidOVyueD1ei0np8MBXdMsJ8OIIRIOW0+RMKLRiOVkxOR7yxAlw7COfGzdunXQzxs2bEBJSQlaWlpw4403oqurC8899xw2bdqEm2++GQCwfv16zJw5E7t27cK11147ciMnIiKitHRB13x0dfXfUbOwsBAA0NLSgmg0ipqamvhzZsyYgfHjx6O5udnyNcLhMLq7uwdNRERElLnOO/kwTRP33Xcfrr/+elx++eUAgLa2NrjdbuTn5w96bmlpKdrarM/lNzY2wu/3x6dx48ad75CIiIgoDZx38lFfX4+PPvoImzdvvqABNDQ0oKurKz4dPiz37yAiIqL0d143GVu5ciVee+017Ny5E5WVlfHHy8rKEIlE0NnZOejoR3t7O8rKyixfy+PxwOORr9QnIroYDe36QpQ5hpV8KKWwatUqbNmyBTt27MDEiRMHxefOnQuXy4Xt27dj0aJFAIB9+/bh0KFDqK6uHubQzC+nMx6Nye02na4sMWbE5NLBCOQrwUv91h0kAeD1//uaGCsslUs4S8rtTy1F+uTutC6XnKjlZMslnE5dLovNtikLLispEmPBnlNizOeQx3ny+AkxFo3I2ynXK5eaRnrlUtvP3n9PjLV++jcxFo4FxRhc8voEAMNufVfKZcjIlj/fukcue/YKJbMAUAB5vc28bKLl433BKID/J853up07d+LJJ59ES0sLWltbsWXLFtx+++3x+F133YWNGzcOmqe2tnbQBewdHR1YtWoVXn31Vei6jkWLFuGXv/wlcnJyzmkM6Wbf3r3INgOWsZz8EniyrL/LLo8PLrf19jQiIURD1q8ZCfad30CJRsmwko/6+nps2rQJr7zyCnJzc+PXcfj9fvh8Pvj9fixbtgyrV69GYWEh8vLysGrVKlRXV7PShShDBQIBzJkzB/fcc49YVr9gwQKsX78+/vOZRzuXLFmC1tbWeAn/3XffjeXLl2PTpk2jOvZk6esNoKuz0zIWNRzwBq3vaZOdk4fsXOvjIUY0DNOwTkJN3reFUsywko9169YBAG666aZBj69fvx533XUXAOAXv/hF/C+X028yRkSZqa6uDnV1dbbP8Xg84qnXTz75BFu3bsW7776Lq666CgDwzDPP4JZbbsHPfvYzVFRUjPiYiSi5hn3a5Wy8Xi/Wrl2LtWvXnvegiCiz7NixAyUlJSgoKMDNN9+MH//4xygq6j+l19zcjPz8/HjiAQA1NTXQdR27d+/Gt771LcvXDIfDCIfD8Z9Zpk+UPtjbhYhG1YIFC/D8889j+/btePzxx9HU1IS6ujoYRv+pgLa2NpSUDL4dvdPpRGFhoViiD7BMnyidnVe1CxHRuVq8eHH837NmzcLs2bMxefJk7NixA/PmzTvv121oaMDq1avjP3d3dzMBIUoTPPJBRAk1adIkFBcXY//+/QD6S/SPHTs26DmxWAwdHR3idSJA/3UkeXl5g6bMo4R/E6W3lD3yYZoaTHPoVd1um06qXqfc1Ra6XDGvHHLpoxmRO6meOCEfEu49Lsd8Uftz0ybkZSwskEtf8yvGiLGYERZjXxyVx6psdni6Ln98IjaNrByaXNqb7ZXLpW2aFsNhF7TpWmxE5LJm3eLzN6C7Ty4zBoCIRy7Tza2Qt0XA1ynGeky5DDcUkP+OKMqbJMaKhVLqQEB+rwt15MgRnDx5EuXl5QCA6upqdHZ2oqWlBXO/7BT95ptvwjRNVFVVjdo4kqmvpxPdJ623WainWyypjxUUQSsstowpU1l3AgcQ65NL0YmSIWWTDyJKD729vfGjGABw8OBB7NmzB4WFhSgsLMSjjz6KRYsWoaysDAcOHMCDDz6IKVOmoLa2FgAwc+ZMLFiwAPfeey+effZZRKNRrFy5EosXL87YSpdYJIxIyDpBNaMxxBxOWN1mLMvthOGzTkwUNCjhYLYZk/+IIkoGnnYhogvy3nvv4corr8SVV14JAFi9ejWuvPJKrFmzBg6HAx988AH++Z//GdOmTcOyZcswd+5c/O///u+ge3288MILmDFjBubNm4dbbrkFN9xwA/7zP/8zWYuUAs5MPHjKhTILj3wQ0QW56aabbMvwX3/99bO+RmFhYcbeUOz8nHnKjzdap8zCIx9ERGmFR0Eo/TH5ICJKKzwKQumPyQcRERElVMpe86FrHuja0OF5PXKHTmXTnTbbJ5dwZudal64BQF9U7iRalOsWY06bsUS62sUYAJi6/Lp9LrmktLTUukMpAJgRuXRy+uxKMfbOW9vFWETJnTJdmvzXWbBXni8vV75Xg9spf1wdmrxeekPyNjzYKpfMdnbK2zCsWXcPHTBmmpzXj8236c6r5G1/6oS83twhm/LlsTadifusG44Fg2xENpq8Lh3ZHuuSeocOODTr9e8wQ4j1WZfqK02H0oRqF5v9GFEypGzyQUSUqVxOHV63dfKhQ0EXruvQzSjM6GkluqcVxSjoUMK9d1hqS6mGp12IiNIVL/+gNMXkg4goE7EohlIYkw8ioqQZxQxh4KgIkxBKQUw+iIiSJgHnTXhqhlIQkw8ioqTioQm6+KRstYvLqcHtHJob9YXljqAOr013Wod1MyYA6IvKHUgdLnnH4HHLJZMulzwWd5ZfjAGAP0+et+24XKbbN1YumS0ZN0WMfXHshBi77OrrxVjv8aNi7O9/+1iMBXo7xZjTIW8Lv18uw9Ugl9q2fiGP89DnNl1tPfJ2yCuVS7cBYEyhzVhtSn+1Dvk9C07JX9exJYVirDJf/lzs32vd0TgYYnXEaHI4HHAJpeO65oAulMw63T44hI63usMJp8u65Nrrlfd/RMmQsskHEVGmcjoccDmtEwWHww1dd1k1tf0y+fBazud2OeETOt56PfL9Y4iSgaddiIhS0Yhdq8HTOpR6mHwQEWU0XnFKqYfJBxERESUUkw8iIiJKKCYfREQZhdd4UOobVvLR2NiIq6++Grm5uSgpKcHtt9+Offv2DXrOTTfdBE3TBk3f+c53RnTQRETpTJmAKUxK6ejfNQ+dTMNELBqxnIxYDEbMgBEzv/z/V5NpyqXoRMkwrFLbpqYm1NfX4+qrr0YsFsO///u/Y/78+di7dy+ys7+6P8G9996Lxx57LP5zVpb9PRGslBTpyPIOzY2iJ0+K8wQN+QsWsOmArnS5fbjTpo17Xp7cqtwt1NsDQDBg3RJ7gM9ls1kicuy9d94RY5Omy/cHOXLE+l4PAKDr8sVqWR55GR0291Xx+eR7WQR65ft8BINyLBaLiLEcofwQAK67cpoY8+bK9+qIOWJiDACMaJ8YCx6W7/Oh91iXUQJASVauGLty2mXyfPmlYqyl9aDl46GI/fLRhTGVBtMUvltKhwbrjremYSCqrPdXOhTcwv7KjMn7OKJkGFbysXXr1kE/b9iwASUlJWhpacGNN94YfzwrKwtlZWUjM0IiIiLKKBd0zUdXV//dIQsLB99d8YUXXkBxcTEuv/xyNDQ0oK9P/iswHA6ju7t70EREROeIl3hQGjrvO5yapon77rsP119/PS6//PL44//yL/+CCRMmoKKiAh988AG+//3vY9++fXjppZcsX6exsRGPPvro+Q6DiOjixtt4UBo67+Sjvr4eH330Ed5+++1Bjy9fvjz+71mzZqG8vBzz5s3DgQMHMHny5CGv09DQgNWrV8d/7u7uxrhx4853WERERJTiziv5WLlyJV577TXs3LkTlZVy0yoAqKqqAgDs37/fMvnweDzweNj0iIgozqKvC1EmGVbyoZTCqlWrsGXLFuzYsQMTJ0486zx79uwBAJSXl5/XAImIMo1pGojFrDsH67oOBaGiRQM0KSlRJpRpfQFIKCR3AydKhmElH/X19di0aRNeeeUV5Obmoq2tv0TT7/fD5/PhwIED2LRpE2655RYUFRXhgw8+wP33348bb7wRs2fPHtbAKivdyPENLeX0a3Ip4v7D8oWt7cflq7IihnzkJSdHXkWBPrkdu2H2ijHHWa7z7TgulxP39MolkKGoPB6HkmO5OQVirL2tQ4wdCcglo6aS/2wrHSOXKGum3Mr9VOcpMebJlrdhvl8uUXU75G0RjtiUJwodSQcEwvLrRnrlebNNeb4p4+QKsooyeZ0ePiKXWZ88bv2dCUdZmjmaItEQwhHrdW+YQTid1p8DDdqXe4+h36+AocOMfVmie8aRk1On5O8xUTIMK/lYt24dgP4biZ1u/fr1uOuuu+B2u/HGG2/gqaeeQiAQwLhx47Bo0SI89NBDIzZgIqKLm1Vir87+FKIUMuzTLnbGjRuHpqamCxoQERENF7MNSi/s7UJEREQJxeSDiIiIEorJBxERESXUed9kjIiIzpOSr6FTSoMplMxqkOczDUDqrWkqdrWl1JKyyUdevgs5WUNLEoNCaSAAFJRYd4IEAGTLnXVPtMs18KGI3C3V6Za7ntrMBvMsZYxRQx5PV1AuN8226d4a6pPLYoOhE2IsYjNWwyamlLwtervlbZiX57OJ+cVYMCi/5omT8jrLyZE77Gq6fGBQi9lffO12ysvhkavF4XbL6+2SKZeIsWCfPJ6dO/eKsQ/+dszy8ZhNh2i6cKZpwjCk74+CUtafPdMAIGwaw4zBFBKTqE3XZ6Jk4GkXIiIiSigmH0RERJRQTD6IiIgooZh8EBERUUIx+SAiIqKEStlqFyKijGVzN3RN06Fp1lVPugZIPRs13QSkChqxFS5RcqRs8uHwOuH0Dh2eN88tzlOYIx/IcQbl8lWXTy4r7D5ls4oM+f183hJ5Npd9GaMR7hRj7ix5PC6nvG4cDrnUOGxzD4BIVC7RUzadazWbSlQVkct+DTkEl10nWbdcZtx5Si61DUbkLrr+fLmU2mlThgsAus226IPcmbj9RI8YO2XT0bgnIHctfmPHp/L7CRXK0n0mrDQ2NuKll17Cp59+Cp/Ph+uuuw6PP/44pk+fHn9OKBTCAw88gM2bNyMcDqO2tha/+tWvUFpaGn/OoUOHsGLFCrz11lvIycnB0qVL0djYCKczZXdT5013aHC6rT9DbmcOXE7r76tpmpBabEWNMFQsaBnTHDa3ISBKAp52IaIL0tTUhPr6euzatQvbtm1DNBrF/PnzEQgE4s+5//778eqrr+LFF19EU1MTjh49ioULF8bjhmHgm9/8JiKRCN555x1s3LgRGzZswJo1a5KxSBnk3JNIokTKvD8piCihtm7dOujnDRs2oKSkBC0tLbjxxhvR1dWF5557Dps2bcLNN98MAFi/fj1mzpyJXbt24dprr8Wf/vQn7N27F2+88QZKS0txxRVX4Ec/+hG+//3v45FHHoHbLR9JIjs83UKpiUc+iGhEdXX1nwIqLCwEALS0tCAajaKmpib+nBkzZmD8+PFobm4GADQ3N2PWrFmDTsPU1taiu7sbH3/8cQJHT0SJwCMfRDRiTNPEfffdh+uvvx6XX345AKCtrQ1utxv5+fmDnltaWoq2trb4c05PPAbiAzEr4XAY4fBX13J1d3eP1GKkNAUez6D0xyMfRDRi6uvr8dFHH2Hz5s2j/l6NjY3w+/3xady4caP+nqmAiQdlAiYfRDQiVq5ciddeew1vvfUWKisr44+XlZUhEomgs7Nz0PPb29tRVlYWf057e/uQ+EDMSkNDA7q6uuLT4cOHR3BpRpeuK2ia9QTNhFKGMJkwlWE5KWVCmcLE604pxaTsaZdArxOaaVFa6cgR58nJlus0XT7525dt02bU75fLUHu7rcva+mPtcqzvLF1tQ3I8110kxrwuuRQ1FpZLjZ1OOQcVqgEBAC6PXL6nafKMWTnyx063+UTGDLnU1O2TZ8zLl8uMOzrk0tYemxLkvEJ5OwBAn00X0c/+cVKMffqh/Au0tFAu/S2tlJcRurwcxf5cy8cN08Tnp2zqnk+jlMKqVauwZcsW7NixAxMnThwUnzt3LlwuF7Zv345FixYBAPbt24dDhw6huroaAFBdXY2f/OQnOHbsGEpK+svUt23bhry8PFx66aWW7+vxeODxyCXWqSwrN4r8Yuv1GwqaiEYDlrFw1EQ0ar0vM2ImTKHbcjQif/+JkiFlkw8iSg/19fXYtGkTXnnlFeTm5sav0fD7/fD5fPD7/Vi2bBlWr16NwsJC5OXlYdWqVaiursa1114LAJg/fz4uvfRSfPvb38YTTzyBtrY2PPTQQ6ivr0/bBGPk8fAFZQ4mH0R0QdatWwcAuOmmmwY9vn79etx1110AgF/84hfQdR2LFi0adJOxAQ6HA6+99hpWrFiB6upqZGdnY+nSpXjssccStRhpQMPQBISXn1J6YvJBRBdEncMFBV6vF2vXrsXatWvF50yYMAF/+MMfRnJoFwEmHpSeeMEpERERJRSTDyKiZLA6gzJqL06UWoaVfKxbtw6zZ89GXl4e8vLyUF1djT/+8Y/xeCgUQn19PYqKipCTk4NFixYNKZ8jIrrYaZqCdma5rU357enP6U8shk79jWsHfsYZcaLUMqxrPiorK/HTn/4UU6dOhVIKGzduxG233Yb3338fl112Ge6//378/ve/x4svvgi/34+VK1di4cKF+POf/zzsgR09DGRZVMCGO+Wy2Nwxcimm12fTvVSu3kVhobyKegNCS1AAnZ1y7NRJ+z4Vp+RKTDhMubzVtDn3bkittgHAlGN22ammy+ebHTadSIM23YCVvAnhMuVtGOvrEGNGUN4Whk2n3M5eeb6IfbU0OmzKsP+xX97AnSetSywBIBKQ37TMb30vDACYOWGsGJOGGTVM/PUf8jqlC1M8JoJxE6xXfl9fEBGhUjvY50AoaL0PCIWAoPDxcXvl0m+iZBhW8nHrrbcO+vknP/kJ1q1bh127dqGysvKszaOIiIiIzvuaD8MwsHnzZgQCAVRXV59T8ygiIiKiYZfafvjhh6iurkYoFEJOTg62bNmCSy+9FHv27Dlr8ygrF2tzKCIioovVsI98TJ8+HXv27MHu3buxYsUKLF26FHv37j3vAVyszaGIiIguVsNOPtxuN6ZMmYK5c+eisbERc+bMwS9/+ctzah5lJZ2bQxEREdHwXfB9PkzTRDgcHtQ8asCZzaOseDyeeOnuwERERMNjf69T3gmVUsuwrvloaGhAXV0dxo8fj56eHmzatAk7duzA66+/fk7No4bDcBXBcA1tKBV1XyXOEzblzo167IQY8/rlL2b+GLm0t0CX60IL++ROop0dPjEGAJ0n5HLaYEDeZEbMpoRXyXmmGZPHGgrKnU3dbvn9HE55GXpC8vsFe206Eyu5XDBXt+7OCgCmLl9HFI3K69OTLZcuey0+m6fLd8tjnYR8MTZrTrYYmz57jhi7ZMoUMXbNtXLJ8JGjvZaPhyMx4K//EOejC1M8thLjppVYxsJhEzGhqjrY50Q4ZF0eHg5qCAasv+fHzS8AHD2foRKNimElH8eOHcO//uu/orW1FX6/H7Nnz8brr7+Of/qnfwJw9uZRRERERMNKPp577jnb+Lk0jyIioq+wLy1djNjbhYgoiZh40MWIyQcRERElFJMPIiIiSqhh3+F0tKkvm6P1hayrBYLC4wCgueTGY6YpV6boffKBT2dAfk3ocqOvQFCu6AgEbV4TQJ9dNUhIrsCwWUTY5Zm21S5heayGsmksZ9PILhiWlyEUkd9PKTnmtKk8CkXkWNhunWnyOB1KruYBgHBUfuGIVMoAwGUzn/SdAIDegFwlFLTZhmFh3QyMX9k0K0w16TTWYMhAb+DLdYyvTr0oAJGwCekrGepTCAtFfeGghlDI+nsubWei0XAu30VNpdg39siRI7zLKVGKOHz4MCorK5M9jHPy97//HZMnT072MIgueuey30i55MM0TRw9ehS5ubnQNA3d3d0YN24cDh8+zBuQnYbrRcZ1Y20460UphZ6eHlRUVEDX0+PsbGdnJwoKCnDo0CH4/f5kD+eCZMpnmMuRekZzWYaz30i50y66rltmTLz7qTWuFxnXjbVzXS/p9gt8YGfn9/szZrtnymeYy5F6RmtZznW/kR5/0hAREVHGYPJBRERECZXyyYfH48HDDz8Mj8e+l8bFhutFxnVjLdPXSyYtX6YsC5cj9aTKsqTcBadERESU2VL+yAcRERFlFiYfRERElFBMPoiIiCihmHwQERFRQqV08rF27Vpccskl8Hq9qKqqwl/+8pdkDynhdu7ciVtvvRUVFRXQNA0vv/zyoLhSCmvWrEF5eTl8Ph9qamrw2WefJWewCdTY2Iirr74aubm5KCkpwe233459+/YNek4oFEJ9fT2KioqQk5ODRYsWob29PUkjTpx169Zh9uzZ8ZsIVVdX449//GM8nqnrJd32F4888gg0TRs0zZgxIx5P1e00Evukjo4OLFmyBHl5ecjPz8eyZcvQ29ubwKXod7Zlueuuu4ZsowULFgx6Tiosy0jtDw8dOoRvfvObyMrKQklJCb73ve8hFhudvkApm3z89re/xerVq/Hwww/jr3/9K+bMmYPa2locO3Ys2UNLqEAggDlz5mDt2rWW8SeeeAJPP/00nn32WezevRvZ2dmora1FKCQ3GssETU1NqK+vx65du7Bt2zZEo1HMnz8fgUAg/pz7778fr776Kl588UU0NTXh6NGjWLhwYRJHnRiVlZX46U9/ipaWFrz33nu4+eabcdttt+Hjjz8GkJnrJV33F5dddhlaW1vj09tvvx2Ppep2Gol90pIlS/Dxxx9j27ZteO2117Bz504sX748UYsQd7ZlAYAFCxYM2ka/+c1vBsVTYVlGYn9oGAa++c1vIhKJ4J133sHGjRuxYcMGrFmzZnQGrVLUNddco+rr6+M/G4ahKioqVGNjYxJHlVwA1JYtW+I/m6apysrK1JNPPhl/rLOzU3k8HvWb3/wmCSNMnmPHjikAqqmpSSnVvx5cLpd68cUX48/55JNPFADV3NycrGEmTUFBgfrv//7vjF0v6bi/ePjhh9WcOXMsY+mync5nn7R3714FQL377rvx5/zxj39UmqapL774ImFjP9OZy6KUUkuXLlW33XabOE+qLsv57A//8Ic/KF3XVVtbW/w569atU3l5eSocDo/4GFPyyEckEkFLSwtqamrij+m6jpqaGjQ3NydxZKnl4MGDaGtrG7Se/H4/qqqqLrr11NXVBQAoLCwEALS0tCAajQ5aNzNmzMD48eMvqnVjGAY2b96MQCCA6urqjFwv6by/+Oyzz1BRUYFJkyZhyZIlOHToEID0/fyeyz6pubkZ+fn5uOqqq+LPqampga7r2L17d8LHfDY7duxASUkJpk+fjhUrVuDkyZPxWKouy/nsD5ubmzFr1iyUlpbGn1NbW4vu7u74UdORlJLJx4kTJ2AYxqCVAAClpaVoa2tL0qhSz8C6uNjXk2mauO+++3D99dfj8ssvB9C/btxuN/Lz8wc992JZNx9++CFycnLg8Xjwne98B1u2bMGll16akeslXfcXVVVV2LBhA7Zu3Yp169bh4MGD+PrXv46enp603U7nsk9qa2tDSUnJoLjT6URhYWHKLduCBQvw/PPPY/v27Xj88cfR1NSEuro6GIYBIDWX5Xz3h21tbZbbbSA20lKuqy3RcNXX1+Ojjz4adL78Yjd9+nTs2bMHXV1d+J//+R8sXboUTU1NyR4Wnaauri7+79mzZ6OqqgoTJkzA7373O/h8viSOjAYsXrw4/u9Zs2Zh9uzZmDx5Mnbs2IF58+YlcWSydNkfpuSRj+LiYjgcjiFX4ra3t6OsrCxJo0o9A+viYl5PK1euxGuvvYa33noLlZWV8cfLysoQiUTQ2dk56PkXy7pxu92YMmUK5s6di8bGRsyZMwe//OUvM3K9ZMr+Ij8/H9OmTcP+/fvTdjudyz6prKxsyIXAsVgMHR0dKb1sADBp0iQUFxdj//79AFJvWS5kf1hWVma53QZiIy0lkw+32425c+di+/bt8cdM08T27dtRXV2dxJGllokTJ6KsrGzQeuru7sbu3bszfj0ppbBy5Ups2bIFb775JiZOnDgoPnfuXLhcrkHrZt++fTh06FDGrxsrpmkiHA5n5HrJlP1Fb28vDhw4gPLy8rTdTueyT6qurkZnZydaWlriz3nzzTdhmiaqqqoSPubhOHLkCE6ePIny8nIAqbMsI7E/rK6uxocffjgomdq2bRvy8vJw6aWXjsqgU9LmzZuVx+NRGzZsUHv37lXLly9X+fn5g67EvRj09PSo999/X73//vsKgPr5z3+u3n//ffX5558rpZT66U9/qvLz89Urr7yiPvjgA3XbbbepiRMnqmAwmOSRj64VK1Yov9+vduzYoVpbW+NTX19f/Dnf+c531Pjx49Wbb76p3nvvPVVdXa2qq6uTOOrE+MEPfqCamprUwYMH1QcffKB+8IMfKE3T1J/+9CelVGaul3TcXzzwwANqx44d6uDBg+rPf/6zqqmpUcXFxerYsWNKqdTdTiOxT1qwYIG68sor1e7du9Xbb7+tpk6dqu68886UWpaenh713e9+VzU3N6uDBw+qN954Q33ta19TU6dOVaFQKKWWZST2h7FYTF1++eVq/vz5as+ePWrr1q1qzJgxqqGhYVTGnLLJh1JKPfPMM2r8+PHK7Xara665Ru3atSvZQ0q4t956SwEYMi1dulQp1V/a9sMf/lCVlpYqj8ej5s2bp/bt25fcQSeA1ToBoNavXx9/TjAYVP/2b/+mCgoKVFZWlvrWt76lWltbkzfoBLnnnnvUhAkTlNvtVmPGjFHz5s2LJx5KZe56Sbf9xR133KHKy8uV2+1WY8eOVXfccYfav39/PJ6q22kk9kknT55Ud955p8rJyVF5eXnq7rvvVj09PSm1LH19fWr+/PlqzJgxyuVyqQkTJqh77713SEKbCssyUvvDf/zjH6qurk75fD5VXFysHnjgARWNRkdlzNqXAyciIiJKiJS85oOIiIgyF5MPIiIiSigmH0RERJRQTD6IiIgooZh8EBERUUIx+SAiIqKEYvJBRERECcXkg4iIiBKKyQcRERElFJMPIiIiSigmH0RERJRQTD6IiIgoof4/N1Cc/V7M0REAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# We can see behind the scenes how PyTorchDeRandomizedSmoothing processes input by passing in the first few CIFAR\n", + "# images into art_model.ablator.forward along with a start position to retain pixels from the original image.\n", + "original_image = np.moveaxis(x_train, [1], [3])\n", + "\n", + "ablated = art_model.ablator.forward(torch.from_numpy(x_train[0:10]).to(device), column_pos=6)\n", + "ablated = ablated.cpu().detach().numpy()\n", + "\n", + "# Note the shape:\n", + "# - The ablator adds an extra channel to signify the ablated regions of the input.\n", + "# - The input is reshaped to be 224 x 224 to match the image shape that the ViT is expecting\n", + "print(f\"The shape of the ablated image is {ablated.shape}\")\n", + "\n", + "ablated_image = ablated[:, 0:3, :, :]\n", + "\n", + "# shift the axis to disply\n", + "ablated_image = np.moveaxis(ablated_image, [1], [3])\n", + "\n", + "# plot the figure: Note the axis scale!\n", + "f, axarr = plt.subplots(1,2)\n", + "axarr[0].imshow(original_image[0])\n", + "axarr[1].imshow(ablated_image[0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e7253ce1", + "metadata": {}, + "outputs": [], + "source": [ + "# We can now train the model. This can take some time depending on hardware.\n", + "from torchvision import transforms\n", + "\n", + "scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[10, 20], gamma=0.1)\n", + "art_model.fit(x_train, y_train, \n", + " nb_epochs=30, \n", + " update_batchnorm=True, \n", + " scheduler=scheduler,\n", + " transform=transforms.Compose([transforms.RandomHorizontalFlip()]))\n", + "torch.save(art_model.model.state_dict(), 'trained.pt')" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "046b8168", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Normal Acc 0.902 Cert Acc 0.703: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████| 79/79 [02:06<00:00, 1.61s/it]\n" + ] + } + ], + "source": [ + "# Perform certification\n", + "art_model.model.load_state_dict(torch.load('trained.pt'))\n", + "acc, cert_acc = art_model.eval_and_certify(x_test, y_test, size_to_certify=4)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "a2683f52", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Files already downloaded and verified\n", + "Files already downloaded and verified\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:Running algorithm: salman2021\n", + "INFO:timm.models._builder:Loading pretrained weights from Hugging Face hub (timm/vit_small_patch16_224.augreg_in21k_ft_in1k)\n", + "INFO:timm.models._hub:[timm/vit_small_patch16_224.augreg_in21k_ft_in1k] Safe alternative available for 'pytorch_model.bin' (as 'model.safetensors'). Loading weights using safetensors.\n", + "INFO:art.estimators.classification.pytorch:Inferred 9 hidden layers on PyTorch classifier.\n", + "INFO:root:Running algorithm: salman2021\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The shape of the ablated image is (10, 4, 224, 224)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:timm.models._builder:Loading pretrained weights from Hugging Face hub (timm/vit_small_patch16_224.augreg_in21k_ft_in1k)\n", + "INFO:timm.models._hub:[timm/vit_small_patch16_224.augreg_in21k_ft_in1k] Safe alternative available for 'pytorch_model.bin' (as 'model.safetensors'). Loading weights using safetensors.\n", + "INFO:art.estimators.classification.pytorch:Inferred 9 hidden layers on PyTorch classifier.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The shape of the ablated image is (10, 4, 224, 224)\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnYAAACbCAYAAADvEdaMAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA71klEQVR4nO29e5Bc9XXvu/aj39OPefWMRho9kYSExMNCjwEfh8S6yJjEJpbrxLdcAae4pkxG3MJKOYlSjl2hUlEdn5wyZUeGuqkEkrqhcHFywY6MyeFKtmSMACODQW+EHjN6zGhGMz09/e7e+3f+6NFev9UI2xLz6On5fqpUWr336r1/e8/u1b/+rZehlFIEAAAAAABmPeZMDwAAAAAAAEwOmNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNgBAAAAADQImNiBhmLXrl20ePFiCgaDtHHjRnrjjTdmekgAADAlwN6BqzFlEzs8cGC6+f73v0/bt2+nb37zm/TLX/6SbrnlFtqyZQtdunRppocGGhzYOzDdwN6BD8NQSqnJPuj3v/99uv/+++nJJ5+kjRs30uOPP07PPfccHT9+nJLJ5K99r+u6dOHCBYpGo2QYxmQPDUwiSikaHx+nrq4uMs2ZX/zduHEjrV+/nv7hH/6BiKrPUnd3Nz3yyCP0l3/5l7/2vXjuZg/19tzB3s0N6u25g72bG1zXc6emgA0bNqje3l7vteM4qqurS+3cufM3vre/v18REf7Non/9/f1T8RhdE8ViUVmWpZ5//nmx/f7771ef+cxnPqBfKBTU2NiY9+/IkSMzfh/xb/Y9d0rB3s21f/Xw3MHezb1/1/Lc2TTJlEolOnjwIO3YscPbZpombd68mQ4cOPAB/WKxSMVi0XutJhYQ123YSLZt09jYqNAPmK4nN/uVJy9oDgu9thZ+3RqPeLLf9Ak9KxDSXlieOJoaE3rlCp8rEY/ztTlleT0lvpZCgeVgKCD0HHI8OZ/Pin2xeJRfKNYrleS5LOI/n6WNvSnSJPQiYb4Xti/I4yuWhJ4ytF8DJh+7VJJ6FWV47//r7/wbRaNRmmmGh4fJcRzq6OgQ2zs6OujYsWMf0N+5cyf9zd/8zXQND0wB9fDcTZa9+2//9AwFw2G6cOJtoT989rgnOw5/JpMLVgi9BUtWenKiY4EnB0PSxJ88+ron95065MmVjLRBlnauaCLmyXZA2tl1m+7w5KU38JgKaWm3jx55x5NdV9qTcqXgyceOHvHk8bHLQk+3rZWyZqtH8kIvk+PjVRw+V1tbs9BLNPP3gqsy/J6KUKNCvvo3Kpcr9PJ/7q+L5w72bu5xLc/dpE/sJuuBs22bbNsWExYiIsvkZWPb4smW3yf1Aj6+tKCfJ3N+S07s7ID22uL35P1SzzT5XEHtPaYj1MggnniSyzuDNcdztPBG15F/Bv34pFjPJCX0LGI9/T6FAvJcoaDfk30+lmtX4D9sYmfV6F2Z2PFxZt9S/o4dO2j79u3e63Q6Td3d3TM4InCt1MNzN1n2LhgOUygcoUAwKLb7/fx51Sd2tXoh7cdbWPthVzuxC4b4h2wgwD82zdofjfq5ND07KH+ghiM8OWrSvnhsVx4vHObzuq601aUy/x0DAb7eYo3NVJptNYiPYdvyXLatXbPBNtjnk/fCrx3f0dY4ah8rpyLtbj08d9cK7N3s51qeu0mf2F0rH/bAHTt2lAzTpNTwsNBv0eyZ0cov2hw5mzVCHNuSdUc8OePID6ky2JDkCvzrLpcvCr2yw0ZlWJvpBG15vEqF9Szz6saxei7+hVyp+QVrFFo92dRsYLkoxxSy+foz2urbiCN/cobDbHwNbcXSqJnkkua/zxXYWFbKNUbfrl5LsVzz03YGaWtrI8uyaHBwUGwfHBykzs7OD+gHAoEP/E0AmGo+zN6Np0apXCxSa6JF6Kt2njAqm1fO5i1cKvQcbSJlujlPdnPyM1oY5VUwleeVrfltMhZwYfcNntx9wyJP7pq/QOglkzw+n48/T5WEXNnrXsCfwUpF2rtCgVfcUqO8cjY8PCL0bL9u/NkwNrfKz3Ewwscb01YOA0H5decqvjc+m4+RHksJvVKxauMrsHdgljDpEaDX88DFYjHxD4Brxe/307p162jPnj3eNtd1ac+ePdTT0zODIwONDOwdmAlg78CvY9IndnjgwEyxfft2+sd//Ef6l3/5Fzp69Cg9/PDDlM1m6U/+5E9memigQYG9AzMF7B34MKbEFbt9+3Z64IEH6Pbbb6cNGzbQ448/jgcOTDl/9Ed/RENDQ/SNb3yDBgYG6NZbb6WXXnrpA/FPAEwmsHdgJoC9Ax/GlEzsJuOBC9oGmaZBVBMWsEiLq1vcwdmpyXYZmxLS48q0oMN8sSD0CmUtQ03T82tBxkREpAXQKpffE2+RsSSVsp7QwcdwapIsLD9fWLEkx1Su8DjCmp4dkWMKavsqBsfsmcoVehXi4+mJEE0ROfZMlmNzyhUtZqcmZnM8Xc0YLpVrLqoO2LZtG23btm2mhwHmEJPyBVsuE9llKhVlPGsux/Foi1fM9+RMVmaxlspsQ1ra2C7aPumUWb6cM1fv2HS7J8/vkLFz8Xg7D83mz3m4JnlCDzE2tHTSfDYj9IpanG44JO1Oc4Lj+5YtXe3JR48eF3pk8DGKRbZV8ZjMdtXyw2gszS5yRTK2z3V58KOjfD/zORnLfKXSa8Wpnxi7K8DegasxZckTeOAAAHMF2DsAQL0w8+WzAQAAAADApDDj5U4+jKDhkGm4FI3KIa6Yz8vurSFOefe50p2ZGeFld8fl+Wu+Jv3f1JbtYwmu/2T7pcshNTbO+7QhtUSlW2E8zUv6Ja2kSb4gXSxKc482abWgiIjKJU7XN7V6Ur6adHVHK45saz7WYo07x6/5JkyXr7+YkUVESSsFE9DKrFRc6dody1ZdFaWK3A4AuD4qhQJVDIOMigxvCPg5/GJMK/3U2ildpwtv4vIkye4uT9brVlZPxLZBFAa+KIsB504NsZ7JtvT4u78SeutXsev0ExvWe7Kq6VSZTnPB976zF8Q+v1Y03e/nLOG29vlCr6//PdYLst3N1BR4T6f5Ptk+touxmLTV+Ty7c3Uva6XGrnm19eQlAVC3YMUOAAAAAKBBwMQOAAAAAKBBqFtXbCJgkWWaFKpxP8a1zND2mNYSxpUuDP2VZWt+RVPOZYtaxXa9FY1dk1nqFNk9qiw+xqVLKamnZYqO53ipP+fIjKymkFaYtCjHbmmtc0yD1/+tgGwjlM+yKyXs0/o51rhBClpHjbxWPd2t8S2kMny8VI7vS6a2en25ev0VB65YACaDYj5HhnKpKSQ/47EWzk792C23enL30uVCb1zLSD1+qt+T05oNIiLKpFKefDnF7teLAzIsI6ZlxZLJWaK7v//vQs/3X9kW/k7Px3m7T4aDdHaye5iU7CaUGuUwl1++xT1lbZ+0/ZEo27iKFjZSyqSEnmaeqV2rluDU2ODLIzwOk7R+2rb8WkwkqlnG5ZoOPADUK1ixAwAAAABoEDCxAwAAAABoEDCxAwAAAABoEOo2xq4tHiTbMinqs8T2YJBfmxbHWYRqOkWUtbIBrlZaRCkZZ1HSOko4JY6hcFVNeRItPkPZXEJgvCRT7R2Hx5fTYtBq49HGs3z88yPyGD6TdWMZHnt5QMam5Mc4fmZhm1buIClLIRhRLjVQHOW4mkxGnndsnGPshsc4pvBM/5jQc6zqY+Mq5P8DMBkEAjYFAj4qW1GxPR/iEkyn0/yZfPuVN4TeyGXu9HD+Andb8FmybYxuW4oVtml6HC4R0bx2/mq4NHDWk2MBWT5lPJX25BOnT/P757XJ8/r4ePO6O8W+Lu113wDHBx5/t1/oJedx3N+ZPs0WlqVtdUv82tG6ZgRrSlgFbI7RzhdYLxaLCT3brr5PuVgHAbMDPKkAAAAAAA0CJnYAAAAAAA1C3bpiO9vC5LctivllqY2mMLsCDOEulW5BQytXUtQqjJskXROtUW6YHYlwqYH0mHR7xrXl+XGti8TZ81IvU2RXrF/zEMwPy1tt+zRX5+WU2FdUWkcNrdxJPCbdNHes5ibe6YvsSlA5eS/ibexyKOZ4HJmMnNcHfKzX3cnnSiZlM/PBdNVlW3Fc6jt0jgAAH41QKEmhUJgupaS9O9nP7sgjhw95sumT9sTRus3kxznEwjKlmzJfZNdpapzl8WxG6J05d9STIyG2BSuXrZQD19y5P//ZTz150ZIlQm3FyhWe3NoaF/sCQb6WeIzdpWZFhoBki3oHIS7Bkk+NCz3H4ZCSYIhtWiYt9WJa+ZSAFuJTKskwnNxEyZhyWf5tAKhXsGIHAAAAANAgYGIHAAAAANAg1K0rtrkpRAGfRXYpJbYHNBdEOMDVwot5uXxe1prdJxLNnlzbnLrk8Ny2XNY6OTQ1Cb0LQ7z0//5ZdhEMjcvleb1Jw6IQL+/f919uFXoL5vHx/+fBU2LfgZMDnlxx2dVhm3Ls4ylu1J3L8PiiUZ/QI4fdz8Eg7/MHZcZx2OB9Fa0r9kKtqTgRUXSk6tIolR3aD1csAB+ZRHMrhcIROtl/Qmy/eIYzTcM+/oyPZWWniEz6kicbLrtfU+PSxZrKs42zA/x5b+tICr2QFqIyf/EtntxdYzNO/+qAJ1sG26qyI7vpDA1zNv7atavEvhuWL+Xja5mvTZtuE3rvHOvz5GKBw2aKvpqsWGIXq6vYjg0MXBB6fq2rUbxZv35ZLSCfr4bNwBULZgtYsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBDqNsauvbmFgn6b8iMFsd00tHIdOS3FvyTjH2xD6wBR5niP2plsvsxxIYlmjs0oOTKe7dQ5js8YSWulRWxZid2y+AyxIOslbZlqHxzheJnlMVmJ/WILH2MwxbEzxZysDv/WCY7HMSscZ1KOyMrpFNfKlZhaaYF4WKhFXb7mgpbyr0ppobe4PTKhg5gTACaD06cPUiAYpGPvnxTbL1x835MdrYxJNB4ReiuXL/bkNavWePLFobzQOzvEx2jvZLuwaJksTxJt5ZizwVF+jxo+LfT6znLc21CK4+hWrRZq9H+s4Li6bEaOydXC8VSJbdzh1w4IveUrb/XkjvkJT37tjf1Cb2CQ7ZUeF1fIS/s5Oso2OdTEx3OVjNnL5qrXX6nIuEEA6hWs2AEAAAAANAiY2AEAAAAANAh164pNtLZRKOCj5qaQ2G6anKKfSnPKf7mmcrqppdu7xEvrqqZie1MTp82XieWjp2TZgWyR3RHBIKfJB/3yeKEIuzebLXYDHDw5KPQqJX5fMS5dse3NPA5DS90vV6RbOldil0ZW6zZRqkgXqaG5m/XGGz5TduFQptbxwubxVYpFqTfhplY17moAwPXxi5//hGyfTXaH7OywbNVaTw5pze1XrV4u9FauWODJToE/x8qUbs8scacc28d2xrISQq9cYRuXHR/x5HhN+EVFswF9l9geB5vOC714jEtOLV22WOxT2vpCPsVdgo69/rbUy/P1r9nyKU9ee/NSoZd/k12x758848nhsCxhFU+0aq/4+yKdlqVkisXqmOCKBbMFrNgBAAAAADQImNgBAAAAADQIdeuKJdMmMn1k+HwfqhLQuiiESWaJ2dqc1TS17hIkM54CIa6wPjzAWVK5Ybkcv7RFq3SueUSDEZlZunLZfD6vplix5HXoy/22JZtdR/18La3Nyzx52fKFQu903y88+dgJdn347RrXqWI3daXCf3KzJqPX5+cxulr1epeky9YwTPE/AOCjMXT+MlmWRbfdcq/YHghwJ4YWrenDvC6Z+T6SYtvVf5JdpyU3IPRMg92Jls2fcUdJm0GanXCK7M5VjrSfTfE2T76c4XAV0y/tsSs6/tSEcGiHbArydS3u6hZqQYvfZxLbtLVrZEZvIpHw5B/m/5cnD1yUNn1+kjvqOAbbal9NuE46XXXtVjNsZYgOAPUIvpkBAAAAABoETOwAAAAAABoETOwAAAAAABqEuo2xKxQqRMogo5yv2cPp9tksp7WXynKOWjE5Ji6T4/iTdE52gJjfzbdAVXjfojYZV7asi+PPcgXeN3/FLULPrzhWY3SMuzeERGo9EV3mgJnuznliVyrLsSpLb+SyBrFmGc8Xa+Zq7qNDPPbRMRmz59PiXUzFMTdlV6bva2F15GgV22uqopCaiJdRCuVOAJgMQpFmsm2bfDUfqZTWeSbQkvDkXEXGuhW0uN9Qc5Tf49Z8eAt61xxtczkn1IIhLRbX4HJJrllTLqqV49T8imP7rFCz0FN+tneuIc9lOJp9svj4voiMAQ418etKke3d5fOylFRrhOMSP/vpLZ785q/OCL2M1omiUBzy5GJefuckogkiIipp3XgAqGewYgcAAAAA0CBgYgcAAAAA0CDUrSvWMRxyDJOUIyud6+6/UJC7UjRFpZvygtb8+vQ5Xma3a3wd/sELnlwYZL3lSVme5JN3sUv0/fPscojObxd6ba3cReLSELsIEgmZ/m+6fHy/1vGh+j4uXWIHU548lLoo9M5f5JR/n4+vPxGTbpp8nq9Z2TyXN2p8rK7mmjUNQ9OT8380nABgcunsXkQ+n/8Dn7VCgcNNBtNsrv2JNqFXrrCbUi8Rlc/Ijjxlxce3bQ7LqFiyLEo4xmVHkq0pT1Yj0k1Z0kI2DJePHQrVdAzSTJyrpE13tC5Bpk/rmmHJe5HJsvvV0OJGAjX3LK3Z3VC4xZM/0XOz0Dv+/llPPnRkgM+Tzgo9/0SHjnJZjhuAegUrdgAAAAAADQImdgAAAAAADULdumLj8QiFgn6q2HL5O5Ph9C9V5iX8sXGZCXq2b1B7D7sjQkE5l714ml0dHUF2Z8yfv0joJbq4urlvXHN1BqXLdsEtG3jXALtUQ5UhoecQX0c2WxD75oXZvVvSKr0bEdnEekGEM9KiCXYBj18eEHqXBi97ctng8RZKNdXmTfaxRgKcVVzKS3fOlQ4VTk1HCgDA9aEMi5RhfcDdlxtn92NAc2+Op0eEXqnAn+Vcmt/jq/mIRiPscm1vZjdlrEWGirQn+FyOzd158gE5vpFFbIOKjhYqUpNl61S0zNqaTF3H1Gyc5opNtMjMWtfhY+pZ+/G4dPv6DbZjqfGUJ6uytGO3rmKbmYjyfdm9+38JvaHBYSIiqlRkFQEA6hWs2AEAAAAANAiY2IEZZ//+/fQHf/AH1NXVRYZh0AsvvCD2K6XoG9/4Bs2bN49CoRBt3ryZ3nvvPaEzMjJCX/ziFykWi1EikaAHH3xQrNQCAEA9AHsHphpM7MCMk81m6ZZbbqFdu3Zddf+3vvUt+s53vkNPPvkkvf766xSJRGjLli1U0KqyfvGLX6TDhw/Tyy+/TLt376b9+/fTQw89NF2XAAAAvxWwd2CqqdsYu8zYCFUKPrJLslOEz9DmoloKvW3JkiG5DMfcNUc5fiQRCQq9/CjH2CW7uDvE/Jt/R+gdOscxIidOsnzHvBahl0rxvo5l3JXCJBlzUtIqnSeULE+SvsQxcSGt2vm8lppzORwX4ruZ41HyNWVRfv7iDz35XD+f1/LL+EDSYua0CilUrpn/m+XqmAqTlP5/zz330D333HPVfUopevzxx+nrX/86ffaznyUion/913+ljo4OeuGFF+gLX/gCHT16lF566SX6xS9+QbfffjsREX33u9+lT3/60/T3f//31NXV9YHjFotFKhY5LimdTn9AB4Bpo1IiMohstyQ2xzVz1R3nz+eNSxNCr0kr/WRpNjKbTgm9Qo7tYijCtmXlcmlbuhct8GTTx/HGmZQ8Xvc87pqz8jR3yYi1SDvb0szlU2xbdpRwNVujNDMejMgSVpWC1g1He4+vtkQM8ee6tY3jkjM5aYOzKY5Fnt/Occ33/cHdQu+FH/3/RDR55U5g78BUgxU7UNecPn2aBgYGaPPmzd62eDxOGzdupAMHDhAR0YEDByiRSHhGjoho8+bNZJomvf7661c97s6dOykej3v/uru7p/ZCAADgNwB7ByYDTOxAXTMwUP1V3dHRIbZ3dHR4+wYGBiiZTIr9tm1TS0uLp1PLjh07aGxszPvX398/BaMHAIDfHtg7MBnUrSvWNIgsg8ipKbWhNHehSbw07hjSFTuq9WtOp7XOC0Xp6pgXZzft+t/9XU9esHKT0Pv/nvpnT+7Uyo5YJVmJ/fyp91lv6WpPDrbeIPQiil3MuZFLYl/IZbdqKc/ug+Fx6UpItHMJltbOxZ6cz8SEnqm9dPwcp1HbeaJc5ntjaKn9hpJp/pVK9bEpz+IWFIFAgAKBwG9WBGAauHPDrRQKhmjp6lvE9gvnuWTS/C52l65YvkzodbbzF72l+HM9rpX7ICIqamVI9M9/U0SWO2lqYleq5Wc3r6/GVZzPcmjHx9awy3bxisVCr+yyQVY16wkVl+24snhMlk9+PZULbG9czS1q2vJ4RlCza9q+Yrks9GyLQ1GcUsqT29tkWamP/5f1RESULxTp+R/+hGYjsHdzC6zYgbqms7Naa2pwcFBsHxwc9PZ1dnbSpUtyclypVGhkZMTTAQCAegf2DkwGmNiBumbJkiXU2dlJe/bs8bal02l6/fXXqaenh4iIenp6KJVK0cGDBz2dvXv3kuu6tHHjxmkfMwAAXA+wd2AyqFtXrKGq/5ya5XO9Sba+Aq/yNXpaomlLK2dXdYZlZtPHbl/hyavuYPfr6CXpAg5UOJts6QLOGHMNmdHameTsKj2LK5eSLoxShfeV8/LP4BC7At4/f86T3z30ptC7YxMfs7WTM3rT4/LXnE9LLmtbzC4XtyabzCmxy7WiuazHhlJCrzhePWCxPDmV2DOZDJ08edJ7ffr0aXr77beppaWFFi5cSI8++ij97d/+LS1fvpyWLFlCf/3Xf01dXV103333ERHRqlWr6FOf+hR9+ctfpieffJLK5TJt27aNvvCFL1w1QwyAeuO2m1ZQJBKhm26Trtj8Gna5RuIcUyGtDpEytBAVzcXYEpErOEr7yOuffteVR6zoGaCaDS4WZejJshsWenLIz7Yln5WdgJSp2ThD2juldYpwFcuOIUNFXC19tpTncTiudCObth6uw1c5flmGspw9zXFmd378Nk/OlWUlhvCEa9dQk9NpB/YOTDV1O7EDc4c333yTfleLb9y+fTsRET3wwAP09NNP05//+Z9TNpulhx56iFKpFH384x+nl156iYJBjgP6t3/7N9q2bRt98pOfJNM0aevWrfSd73xn2q8FAAB+HbB3YKrBxA7MOHfddRcp9eGJGIZh0GOPPUaPPfbYh+q0tLTQM888MxXDAwCASQP2Dkw1iLEDAAAAAGgQ6nbFzq045Fom5Ysy9sOvlRqxbY4lsUwZw3ZDJ5cMCYZ4/rp4kSzMeMvHeUl83sqbPfntA08JvYXdfLzOm9byeNpl2QE7HPfkXIHj9PJpGbcxeIHjO0YHz4l9jlaSIBTl5fe2Ntkpov/CW57cMW++J1dyNSVi8lxx3MiO8nmUjJfRY11CAT6Xv1OeNx2oxpoUSpMTcwLAXCcYiVAoEqGmoCxJEQlrJtrmkk5uzYKPocfYabJb09XGLbvaPj6IURNvW9Gi+PSqSMqQek0JLsFScfg9jivLT5HLB1EkY3NN/QQOy44t7Y4i7aIrWmkmVx4voJ3b5/B4IwU5JjXI9m/oFGehLli5QOgNmxP21Jy95Z3A3AIrdgAAAAAADQImdgAAAAAADULdumJ9lk0+y6bRmm4LToGX6kNhrfF1zTJ5Uitx0n8x5cnLPvYpobdgrf6a3a3l8azQi0fZxdq+4lZPztqyefbht37hycU8HyNd04x7+Hwfj92RbuRgkP8s85ewi/XmFbJ7RcXiNH+flWDZX1NhvcDdJnJnuZK9W6npKKFN8zMWuy3CrbKcQEdXtbRKvjA55U4AmOs0xZop2tREypLux5xWdkhpTdyLNR10shm2NSWtg0yxKG1BpcLu0rJWxkTvOkNElMux3c1lOYykUlMWJdrCdjEaT3hyItom9IJ+vyc7Nd0ryNC6SGjdhKJaGAoR0eVL/L6C1pHI1Tr1EBEZxOdyHb5nsah0cy9ayG278jm+f8qVJbHi0ar981k17mUA6hSs2AEAAAAANAiY2AEAAAAANAiY2AEAAAAANAh1G2NXKhTJdB0KB+QQjaCWym5yLIRyZFxEqIn1PvNHn/HkO+75pNCLtXGcxeCpo55smfJ4qXFukTN05rgnXxiXcWY/feEFT24KcbxMoShLkHR2cGxKLCpj2E6f41IoJW0cLV2Lhd6Ktev4hcPxIyMpWT4lp8Uljub5eIaS97aQ5/iZjFYKQWUKQm9VYkK/JlQGAHB9/OjFlykYDJLj+5nYPjrKZTgyY8OeXFt5Q4+50xvIOzV1UVrak57c3MZtCAOWtAXZkZQnn3iP7WI6I+1Y95JFnmz52N7Foq1Cb8kSbj22oFu2OVuylOOIWwJsq6JBGW/oai3VSIt3K9fYfkvrNWlpx+tYXBP3F2ObWVZsxy2/UKOWlup5AwE5HgDqlWtasdu5cyetX7+eotEoJZNJuu++++j48eNCp1AoUG9vL7W2tlJTUxNt3bpVGBoAAJgNwN4BAGYj1zSx27dvH/X29tJrr71GL7/8MpXLZbr77rspm+WMoq9+9av0H//xH/Tcc8/Rvn376MKFC/S5z31u0gcOAABTCewdAGA2ck2u2Jdeekm8fvrppymZTNLBgwfpE5/4BI2NjdE//dM/0TPPPEO/93u/R0RETz31FK1atYpee+012rRp0299LleVqlXTa6qKG1q6fkVxur5hSJdDMMDL9reuY5dlwCeX04+8zd0bRi+878nFonQ/jo+OeHL/ySOenFEhoedz+H1NWqX4WFC6W9ub2RV7cXBA7KtoZQhy4+z66D/dR5LDPI4MlyQI2vJeVALsfrlc4fsSCslyAuEoX0vIZjfFeC4tjzdRDqDiotwJaFym09795Gevk237KLFgpdiuHP78v/XqTzx50QLZHaGtlV2f58+xPan9jIZbEp5cMtmWDmrhH0REn9zQ48m33nyTJ+dq7KLp46+Q031nPfnEe+8LvXcPsZ1NxJvEvq2f/0NPvvOmFZ7sV3LdYcE87hpU0lyxhik74OgdNcpalwvTrulQkWD7F9I6b7iWjDG58o1h123gEgCSj5Q8MTZWjTtraanWcjt48CCVy2XavHmzp3PjjTfSwoUL6cCBA1c9RrFYpHQ6Lf4BAEC9AXsHAJgNXPfEznVdevTRR+nOO++kNWvWEBHRwMAA+f1+SiQSQrejo4MGBgaucpRqHEs8Hvf+dXd3X1UPAABmCtg7AMBs4boXl3t7e+nQoUP0yiuvfKQB7Nixg7Zv3+69TqfTE8bOJSKX3IpcFrd93FHC0TonlEhmRnXEuRr5f/5wtye3dBwWekl9eT/Hma8+n6xS3hRhF6ZtshsgUuPa7UyySyQ/PurJIUse7/IQZ7iVS9JFEA2yS7SkZaG999abQu/isROeXKxwQ2vyyQrpjj7eBZpLOCLvrRlgN0tQq77eTNLdvOqmJURElMuXiehXBECjM9X27r7P/58UCoUpkFwu9HPjPEF8713+rM3rlBNCU3MlhoJsq0puXuitWMPHb57HIRq5Ntm94ffv4VVIPUQjW+OKdTUvaEWxa7dQkXqXLnEoy9nTF8S+cJjHO3DusiefOfye0DO1DjqnBi558oa7bxd6ixZ3ebKeMWsGa9JdfWx3Db3bhCHtsd+oXpffV5OKDECdcl0Tu23bttHu3btp//79tECL9ejs7KRSqUSpVEr8ih0cHKTOzs6rHIkoEAhQIBC46j4AAJhpYO8AALOJa3LFKqVo27Zt9Pzzz9PevXtpyZIlYv+6devI5/PRnj17vG3Hjx+nvr4+6unpqT0cAADULbB3AIDZyDWt2PX29tIzzzxDP/jBDygajXpxJPF4nEKhEMXjcXrwwQdp+/bt1NLSQrFYjB555BHq6em5pgwxAACYaWDvAACzkWua2D3xxBNERHTXXXeJ7U899RR96UtfIiKib3/722SaJm3dupWKxSJt2bKFvve9713zwFzXINc1yG/LeLGgzXEcpKW5K0uWE3FLXDJkeJjjVDJDMqg5VOasNJf4XC3NsnJ6oqvdkytO0ZPPX5DHU8RxGKbJt7dUqamObnBsXiQYFvu0ii5k6S9qSro4JY4JNLVgl3RuVOiVAhxnE+3isWdDKaE37nLMXSHLi7mtsaVCr20ijjCbResJ0LhMp70L+EwK+E06ceyQ2J4eY/ui9DIeJfnZy2S4tp5hsC0I1nRLKOe4LNLYEB9vsE+WO/nxf/7Yk0fHtfdkxoReNMbxcfHmFk+OxKS7+dw5jqtLts0X+4IxjvX72Y/4vCPvvSP0HM2mnxzgItDnsuNCb/kqjiOMx9i2xrUSU0REoTCXO4lH+D75gvI7JxyuXktJt8UA1DHXNLHTDcuHEQwGadeuXbRr167rHhQAAMw0sHcAgNnIR6pjBwAAAAAA6oe6raVtGgEyDZuCAVlqQ2llTSIhXmaPRGWD51yZU+Nbo5zmbteURSmN8ZK+a7JezieX3Ts6OHDa1dwgK2+WFeBf/QkHUpdUzpN9hqyOns/wvlg0Jvb5tRLnlsHjyBRkCYHTF9nlmkrxdRWNrNBrX8Hz9/kJrZSKkun/o8M8Jn9BcxXPl27pfK5aDiCfR+cJACaD8ZFBquRDtPcHPxLb+wfOebJZ5pCKd96pKWys2ZeKHvZhSDv28u69nuzXSjrdetvHhF7JH/XkdJHtwqm+S0Lv8uWj/J4Cn+vCwBmhd/oM691+2zqx7//u5fIvb7zGhZ0rY5eFXrrIYSR5LeTl1JvSjfyzgxc9OWKz+9bnly5WS8tOjmqu2AWLFgu9z279AhER5XIodwJmB1ixAwAAAABoEDCxAwAAAABoEOrWFeuzDfLbJuW05XciIivI2a+u1s0hV5YV1i2tSnjAz+5Hn09mz/rDnCkVj/G+gaFBoZebzy7XZPcNnnz+0rDQu2n9nZ6cGeJMsFMnZMeLbCblybYlxx6Ps2vWIHZvXDwvK7b3ndWyYgM89liHzLJtb9GOp7lzjRF5L5pH+XGYn+QMtwUJ6W4+eaSaqZcvlAlcHZ9t0v+15SaKhqouHtP0kTnRAcQ0bQpMZEIbBlEoGCBzIsP73/e9Q6+8c2pmBg1mjM5kB4XDEVq+WNbKU9rn3zZZtmpCO0yLf6Mrl22fPyg/4+TjTNCuLs5OvWvLFqEWDWvZpEHuSnHkkOw0c+Lk+3wN8xd7ckHJNQNLC5s5dOKY2HfkBHfQCS9e5ckXLshuGM0Jfp30cxhJuEmG64wMnPXky+dPevLQsLTpBUfLMtaqClxMya/FOz5Z3ZfPy3sOGNM0adVNt3sdmyyb7R0ZBpFVtYMGGZRoTpA1se/wO69T3+ljVz0muH7qdmIHwGzGNAxa1hmj5mj1i9Sy/GSavgnZplA46pWliDaFvJZQ+95+/+oHBACAOsUwDIrFWig40Q7Tsv1k2RNxi4ZZndhN2LtksoPsiTjyM+8fmZHxNjpwxQIAAAAANAhYsQNginCVQe6E96y6Ilf9HaUUkVOZcGMbRE7FT8qsuoV0NxoAAMwGTNOkG5Yvp6amaja1ZbErVpFB6krxf4MoHAmTYVRtYSgUvOrxwEejbid2yVaTwkGTypdlynve4TiTrFbVQ5my9IatlQyJxbhch98nK7Hns1w2IOTTbkdJ3po3X33Vk5eu1Kqen5OdJ0ytG0ZYq/puWbISeyjEsS/ZjIyxy+f5daXCpVWaQvIYd9y2wpODWsmUiiVLujhlLleQ7+cYO3NcfqiSYS5xcNuKm3h7okPoHbx4moiICiV5HiBx3WoHFSIiUiYZE8ZNuYpKxYJXoaLiD5BlXdmH6vZzkdHhUSqEirRp4x1i+x2/8zueHAhwuQ7bks6WK658IiJXabF4JEt8lEtsJ/MltguXz50WeiNa/OzI8IgnnzopQwUuXGL715Ts4h0BaVsMP8fYlSoybvrlfa948qJlaz25u6WmQ4XWySeslWopFmTniVNpjmdu0uyio6S9GhjNeHJb22JPzpXlZ3DvvjeIiKhcRqedD8O0LFq/YSO1tFbLjhlkkDnxQ9Z1FTkTXTuUUlQsF7wOTbFo08wMuMGBKxYAAAAAoEGo2xU7AGY7SilyJ1bgXOWQ61ZXDAyj6o5VisigakFZTw8rdgCA2YYiqlTKXg9jkwwyVNUloZS2YkeKlHKqbyAigxB6MhXU7cRuwQI/NYV8FDfkkv7JfnYfDGpNrEuOdFM2NfGlZXNcFsRxM0LP0hYtR4bY7Tuekcv2hTIfw1IsR5tkSv7gALstzmXZ7ekqmSrf0c7uYcOVZUNGU9xRIhDh60rEo0LPr7ljipqLhWzpbs4WWa+U0TpKuHLB9obuTk/u6uTx9Z+TZQIuD1X/BsUyOk98GEopKhQylLeqhq7iZMma+HsZRJ6bgohobDRFauL5GB8f/8CxQOMTDgcoHArQ5bTsLvPWOwc9OZlkW9ORlJ12ymW2IaOjKd5R063G1mzN/CXsOu1ulrbl/Anu3pDNsOs02dEp9MKtCU+2guz2zOXleefNW+jJAxfOiX3Dl9mezuvi+BqjpldvpqjZSZvtYtmVdiighbkEtLIwpctDQo9MtoUdWqmWUlG6XK8M47doHTxnUcql/jOnKHUldKpSIXKqfxdDKbLUlR+1BrV3tZFlV0MEDKdw1eOBj0bdTuwAmO3o3wPVhvLK267IJYMMUqoagyL1AABgduG6iu1XNcC4KiuXXOWSQUTKICKlCBUBpxbE2AEAAAAANAh1u2IXS/ioKeyj/FBObG9OalleEc60Gh6UmVaFEi+n2352EZRqEptczZ1YdvgYY/lRoRfRMlILOV4+zhdk54mSdjxHk5WS2WmZNF9XLCYrp8di3A0jn2e94ctyTE1N7HIwtKw4oyJXffw2H19PVvPXNMVefMNiPq/W8Hr/fllE8p0T1UbgFQfxYL8O13HIueKOMEz+NauIyL0Sf0JUcSre8p7jwr09FwnYLgV8LhULKbH91Vf3eLIqs92JhaXNKJc5dKSgZdXbNb/dFy3u9uQ1m1Z78rKFXUIv1c/u0oFRtnH+msz8Za3smh0a4jCXtSvXCL2b1q705Gf/338V+2ziLhJlLXylVJJuOlXRPhtBvl4rIMe0eMlST77Uf5x3mNLehbQwl1WruMJAISfDdbrnJYmIqFiE2/DDcCoOvfnqfgpMfMEYStFEBSfy+33UnKh+B1uWRd3L51N44rvbF/Bd9Xjgo1G3EzsAZjtKsWvCdZXXaUK5RGqinZFSihzdFUuYLAMAZhdKKRoeGiTfRHy3SQaZEw7XQDBAllWNr7Msi0zb9CZ0epkeMHngroIZZefOnbR+/XqKRqOUTCbpvvvuo+PHjwudQqFAvb291NraSk1NTbR161YaHJQJHX19fXTvvfdSOBymZDJJX/va16hSQZ09AED9AHsHpgOs2IEZZd++fdTb20vr16+nSqVCf/VXf0V33303HTlyhCKRqqv5q1/9Kv3oRz+i5557juLxOG3bto0+97nP0c9//nMiInIch+69917q7OykV199lS5evEj3338/+Xw++ru/+7sZuS7XVfSrsykKT7i7Tcsk02D3q1ZDthp0POGLvTQGdw8AjUqj2jtFisbGx7xC6yYZXoKEnfdRya2GOZmmSa/8/FUKBqsu23Pnz8/EcBseQ9VZGl46naZ4PE5v/c//StGwjy6feVnsH9NKdKTzPC9NXZYurPSoNmd12j0xEpTp+o7eyaKY8uTxnOx4Edbi1JrCHANXVPK8Oa2LRLnI+wwlF0cjAc4LamqSJV1srVxJ2eEU/4uDNV0utPiEeILjCG2/X+ppVdqHszy+8bQsrfGpzbfzPq2tx3//Hz8UeoMTYX+uq+jsaIHGxsYoFovRZDA0NETJZJL27dtHn/jEJ2hsbIza29vpmWeeoc9//vNERHTs2DFatWoVHThwgDZt2kQ//vGP6fd///fpwoUL1NFR7ZLx5JNP0l/8xV/Q0NAQ+Wvux9W48tyB2cNkPnczxZXn7v/5zhMUDoVoMCUn9heGOb7NLfFn0irL1RlXs2PK4lgyy5bPflCLS563ZJ4nR0jGdo5oJY4OnePY3ldfe0XoXR7iEiJLl3Ac3fo7ZAeNiGbjfvwfPxD7VJm/gjq1siOmJdcdXIev2a91CbL9Mk5r5UqOsTtz7G0+jyM7/Lxx8C1PvvljGz05r7c0IqKuZPX7o1Qu0bP//izsHZgRruW5gysW1BVjY9WaVi0tLUREdPDgQSqXy7R582ZP58Ybb6SFCxfSgQMHiIjowIEDtHbtWs/IERFt2bKF0uk0HT58mK5GsVikdDot/gEAwHQCewemAkzsQN3gui49+uijdOedd9KaNdWsuoGBAfL7/ZRIJIRuR0cHDQwMeDq6kbuy/8q+q7Fz506Kx+Pev+7u7qvqAQDAVAB7B6aKuo2xy2ZsMlwfkSWbBDdF2FXhC/ESfqSm6XQ8zq6JTDqvyTIINZPTyp0UWI76W4Ve0MfL/ZUil0WxbTk39msvfVrTbsOQemGtM4ZZ81eo6C6HEO+MJcJCb2SEXanjmks41iLHnqtwjZf3zrCL+di7/UKvo4WXeTsWaOcypbu5baIDhuO6dHZ08mLCent76dChQ/TKK6/8ZuWPyI4dO2j79u3e63Q6DWMHZoxIxEfhsJ/iNYEx0XYuw1HU7E6w5je532D3mwpp5Y3C0i3nFriUx/g4r9pYYeniSS5LePKyMLuD3zv9vhygwTbOF+aQj/MX+4Raa1vzVWUiolKeXZ/FInehyGalbSlqZUjKRS4DZQelXezo4tCbsxfZ3g/2ybEXMnyu9w+/zeNrbRd6qrm6mqbKk5uxDnsHpoq6ndiBucW2bdto9+7dtH//flqwYIG3vbOzk0qlEqVSKfErdnBwkDo7Oz2dN954QxzvShbZFZ1aAoEABWrqXwEAwHQAewemErhiwYyilKJt27bR888/T3v37qUlS5aI/evWrSOfz0d79nCh1uPHj1NfXx/19PQQEVFPTw+9++67dOnSJU/n5ZdfplgsRqtXryYAAKgHYO/AdFC3K3YX+onCQaJiSrpYo+3spgyGOGM0Lj221NLCl5bJ8rJ9KiU7WYxe9msyb7dcWaXc1ZKHr3QTqO6Q2WT6TNkwOfPVsuWtzjusqWrKD/m0Rt2V3AifNy/H7mjZs6kM7yvVNC8Y0VzRZ07yRaYuy+yvUpbf2BnnX36rFs0XelcOV3Zc+uWZEfoo9Pb20jPPPEM/+MEPKBqNejEi8XicQqEQxeNxevDBB2n79u3U0tJCsViMHnnkEerp6aFNmzYREdHdd99Nq1evpj/+4z+mb33rWzQwMEBf//rXqbe3F79SwawglzlJ5ASJXPlb22ewYRscZNfhe0fOCL2glrXvjyc8uS0p3Z5dbZwJaWvFYVvjMnxDbypT0LrwJJPSZTu/q8WTL2rxXSdOHBV6i0s8gdFdykRE4+N8Xbkcu07TYzLAX3fFOiW2aVYgIvQOH2rz5FKRw1CSSRmXNv9m7o6RbOd9be1y1Ss4cfzCJHSegL0D00HdTuzA3OCJJ54gIqK77rpLbH/qqafoS1/6EhERffvb3ybTNGnr1q1ULBZpy5Yt9L3vfc/TtSyLdu/eTQ8//DD19PRQJBKhBx54gB577LHpugwAAPiNwN6B6QATOzCj/DZlFIPBIO3atYt27dr1oTqLFi2iF198cTKHBgAAkwrsHZgOEGMHAAAAANAg1O2KneNrJccXoLL/drG96HJ8hlnhNPxg3BB6iXaOzWs2OYitJSdT1lMjHJuSGua4unxW3hqnopUN0LpIuBV5vEKe4zD0CuCWLWP2xgv8vnxGxm74FMeFRM0on8uUMSflMo8xEOFfgkGfjLNI+Pl4SynhyWtvkbEpK2++xZMX33CDJ2/YJGP7zl2oxroUSxWiX54hAMBHQ5WK5FpEZs1vbbvMdiPmY5tx8LV9Qm9gkG2hoX3+N2xYJ/Q+3sP29EpxXCKid375utDLFtgmnejjskinzpwRevkc2wal2AYHY7JkSFrrcjM+Oiz2ZdMcw6dbcduSNj0e5bImXVrSQXPrPKGX7OIYua7b1npyS0zaO7/eoUOT9RIuROTZe70jEAD1DFbsAAAAAAAahLpbsbsSg5ArVFeZ8oWS2G/4OGPUdXklzszJX3d2lvXI5GzPbF6usGXzrJfTV9EKMhbCFZmrv2bFrsjHc7RfsJYjU1XzRT5+oVQW+5Ti17a22lgoyfTZov7S4ONZSv7iLGp9JUsVHoevpt9kTrvXGa04aL4ox1ecGMeV49ZZu+HrohGuYa7RCH+zK9eQL1Q9EeWa39oV7bNcKLC3wnGl3dGz9g2tWHm5Ij/jBS0jtahljBZL0s6WNJtU0Y7h1pxXaa/1FTu3plqAq/WiVbXH+JC/Y+1m/dx6ZYJKzTWWy9p1addbKNZUOjCvbcXuSlZsIz13YPZwLX8zQ9XZX/jcuXOoiD3L6O/vF0U2ZyOnTp2iZcuWzfQwwDXQCM8d7N3sA88dmAmu5bmru4md67p04cIFUkrRwoULqb+/n2Kx2G9+YwNzpf1Lvd0LpRSNj49TV1cXmebs9uqnUilqbm6mvr4+isfjv/kNYFK5lme8kZ472LsPAns39biuS8ePH6fVq1fX3X2eC0y1vas7V6xpmrRgwQJKp6uJArFYDA/dBPV4LxplEnTlAxOPx+vuHs8lfttnvJGeO9i7q1OP96KRnrv586uF5+vxPs8Vpsreze6fHQAAAAAAwAMTOwAAAACABqFuJ3aBQIC++c1vovcd4V5MB7jHM8tcv/9z/fp1cC+mB9znmWOq733dJU8AAAAAAIDro25X7AAAAAAAwLWBiR0AAAAAQIOAiR0AAAAAQIOAiR0AAAAAQIOAiR0AAAAAQINQlxO7Xbt20eLFiykYDNLGjRvpjTfemOkhTTk7d+6k9evXUzQapWQySffddx8dP35c6BQKBert7aXW1lZqamqirVu30uDg4AyNuLGYi8/cVDNZz3RfXx/de++9FA6HKZlM0te+9rUPNH6fzczFZw/2bmaZi8/cVFNX9k7VGc8++6zy+/3qn//5n9Xhw4fVl7/8ZZVIJNTg4OBMD21K2bJli3rqqafUoUOH1Ntvv60+/elPq4ULF6pMJuPpfOUrX1Hd3d1qz5496s0331SbNm1Sd9xxxwyOujGYq8/cVDMZz3SlUlFr1qxRmzdvVm+99ZZ68cUXVVtbm9qxY8dMXNKkM1efPdi7mWOuPnNTTT3Zu7qb2G3YsEH19vZ6rx3HUV1dXWrnzp0zOKrp59KlS4qI1L59+5RSSqVSKeXz+dRzzz3n6Rw9elQRkTpw4MBMDbMhwDM3PVzPM/3iiy8q0zTVwMCAp/PEE0+oWCymisXi9F7AFIBnrwrs3fSBZ256mEl7V1eu2FKpRAcPHqTNmzd720zTpM2bN9OBAwdmcGTTz9jYGBERtbS0EBHRwYMHqVwui3tz44030sKFC+fcvZlM8MxNH9fzTB84cIDWrl1LHR0dns6WLVsonU7T4cOHp3H0kw+ePQb2bnrAMzd9zKS9q6uJ3fDwMDmOIy6KiKijo4MGBgZmaFTTj+u69Oijj9Kdd95Ja9asISKigYEB8vv9lEgkhO5cuzeTDZ656eF6n+mBgYGr/m2u7JvN4NmrAns3feCZmx5m2t7ZH2HsYIro7e2lQ4cO0SuvvDLTQwFgUsAzDT4MPBug0ZjpZ7quVuza2trIsqwPZIkMDg5SZ2fnDI1qetm2bRvt3r2bfvKTn9CCBQu87Z2dnVQqlSiVSgn9uXRvpgI8c1PPR3mmOzs7r/q3ubJvNoNnD/ZuusEzN/XUg72rq4md3++ndevW0Z49e7xtruvSnj17qKenZwZHNvUopWjbtm30/PPP0969e2nJkiVi/7p168jn84l7c/z4cerr62v4ezOVzOVnbqqZjGe6p6eH3n33Xbp06ZKn8/LLL1MsFqPVq1dPz4VMEXP52YO9mxnm8jM31dSVvZuE5I9J5dlnn1WBQEA9/fTT6siRI+qhhx5SiURCZIk0Ig8//LCKx+Pqpz/9qbp48aL3L5fLeTpf+cpX1MKFC9XevXvVm2++qXp6elRPT88MjroxmKvP3FQzGc/0lfT/u+++W7399tvqpZdeUu3t7Q1V7mQuPnuwdzPHXH3mppp6snd1N7FTSqnvfve7auHChcrv96sNGzao1157baaHNOUQ0VX/PfXUU55OPp9Xf/qnf6qam5tVOBxWf/iHf6guXrw4c4NuIObiMzfVTNYzfebMGXXPPfeoUCik2tra1J/92Z+pcrk8zVczdczFZw/2bmaZi8/cVFNP9s6YGBAAAAAAAJjl1FWMHQAAAAAAuH4wsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBAwsQMAAAAAaBD+N0XSy/Q4P/1ZAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnYAAACbCAYAAADvEdaMAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABId0lEQVR4nO29e5BcV3X/+z2vfk4/5j0azehh2ZJl/ALbksfmR5yga2FIwMHUL9yiAqQoKIjELaMUSVyXQIVKlerHzS0oiMB1U4lN6sYF5fyueTjG+TkykXGQbSzsYMl6WLbk0WtGo5np6enXee77R3fvtXdLspE8o2m11qdqanb3WX3OPqf3Wb3PXi9DCCHAMAzDMAzDXPaYS90BhmEYhmEYZmHgiR3DMAzDMEyHwBM7hmEYhmGYDoEndgzDMAzDMB0CT+wYhmEYhmE6BJ7YMQzDMAzDdAg8sWMYhmEYhukQeGLHMAzDMAzTIfDEjmEYhmEYpkPgiR3DMAzDMEyHwBM7pqPYsWMHVq1ahUQigY0bN+KFF15Y6i4xDMMsCqzvmHOxaBM7HnDMpeaHP/whtm3bhq997Wv49a9/jZtuugmbN2/G6dOnl7prTIfD+o651LC+Y86HIYQQC73TH/7wh/jkJz+JBx98EBs3bsS3vvUtPProozh48CAGBgbe8rNRFOHkyZPIZDIwDGOhu8YsIEIIzM/PY3h4GKa59Iu/GzduxG233Ya/+7u/A1AfS6Ojo/jiF7+Iv/zLv3zLz/K4u3xot3HH+u7KoN3GHeu7K4OLGndiEdiwYYPYsmWLfB2GoRgeHhbbt29/288eO3ZMAOC/y+jv2LFjizGMLgjXdYVlWeKxxx7T3v/kJz8pPvzhD58lX6vVxNzcnPx79dVXl/w68t/lN+6EYH13pf21w7hjfXfl/V3IuLOxwHiehz179uCBBx6Q75mmiU2bNmH37t1nybuuC9d15WvRWEC8ZcNG2LaNublZTT5uRrLdHROyPdKd0uT6euh1by4t2zHT0eSseFJ5YcnmbGFOk/MDOlY+l6NzC339fDw6l1qN2olkXJMLEcp2tVrWtmVzGXohSM7z9GNZoK/PUvrele7S5NIpuha2k6D+uZ4mJwzlacCkfXueLhcIQ37+r779z8hkMlhqzpw5gzAMMTg4qL0/ODiIAwcOnCW/fft2/PVf//Wl6h6zCLTDuFsoffc//uERJFIpnDz0siZ/5s2Dsh2GdE8OjKzV5EZWr5Pt/OCIbCeSuoo/vP952R5/Y69sByVdB1nKsTL5rGzbcV3P3nL7HbJ91dXUp1pR19v7X/2NbEeRrk/8oCbbB/a/Ktvzc9OanKpbA1/R1TNVTa5Uof0FIR2rr69bk8t30+9CJEr0mUATQ61a/458P8BT//ZMW4w71ndXHhcy7hZ8YrdQA862bdi2rU1YAMAyadnYtmiyFXN0ubhDp5aI0WQuZukTOzuuvLboM9WYLmeadKyE8hkz1MRggCaeiGhjomV/oeLeGIX616DuH4LkTAhNzgLJqdcpGdePlUzEZNtxqN26An++iZ3VItec2NF+Lr+l/AceeADbtm2Tr4vFIkZHR5ewR8yF0g7jbqH0XSKVQjKVRjyR0N6Pxeh+VSd2rXJJ5eEtpTzYtU7sEkl6kI3H6WHTbH1oVI+lyNkJ/QE1labJUZfyw2NH+v5SKTpuFOm62vPpe4zH6XzdFp0pFN1qgPZh2/qxbFs5Z4N0sOPo1yKm7D9U1jhah1UY6Hq3HcbdhcL67vLnQsbdgk/sLpTzDbgDB/bDME0UzpzR5HsUfWb00ou+UJ/NGknybSlHM7JdCvWbVBikSCo1erqrVF1Nzg9JqZxRZjoJW99fEJCcZZ5bOdaPRU/IQcsTrFHrlW1T0YG+q/cpadP5l5TVt5lQf+RMpUj5GsqKpdEyyYViv6/USFkGfovSt+vn4votj7ZLSF9fHyzLwuTkpPb+5OQkhoaGzpKPx+NnfScMs9icT9/NF2bhuy568z2avOinCaOwaeVs2YqrNLlQmUiZUUW2o4p+j9ZmaRVMVGlla3mf7gu4YvRq2R69eqVsDy8f0eQGBqh/jkP3U5DXV/ZGR+geDAJd39VqtOJWmKWVszNnZjQ5O6Yqf1KM3b36fZxI0/7mlJXDeEL/uYsEXRvHpn0U5wqanOfWdXzA+o65TFhwD9CLGXDZbFb7Y5gLJRaL4ZZbbsHOnTvle1EUYefOnRgbG1vCnjGdDOs7Zilgfce8FQs+seMBxywV27Ztw9///d/j+9//Pvbv348vfOELKJfL+JM/+ZOl7hrTobC+Y5YK1nfM+VgUU+y2bdvwqU99Crfeeis2bNiAb33rWzzgmEXnj/7ojzA1NYWvfvWrmJiYwM0334wnn3zyLP8nhllIWN8xSwHrO+Z8LMrEbiEGXMI2YJoG0OIWsFLxq1s1SNGpA/26b0pS9StTnA6rbk2Tq/lKhJoiF1OcjAEAigOtiOgzuR7dlyTw1YAO2kfYEmRhxejEXE/vkx9QP1KKnJ3W+5RQtgUG+eyZItLkAtD+1ECIrrTe91KZfHP8QPHZafHZnC/WI4Y9v+Wk2oCtW7di69atS90N5gpiQX5gfR+wfXiu7s9aqZA/2qq1y2W7VNajWD2fdEhPH+lF29GNMtdcQ5Grd9x+q2wvH9R953K5fuqaTfd5qiV4QnUxNpRw0mq5pMm5ip9uKqnrne48+fetueo62d6//6AmB4P24bqkq3JZPdpViQ/DXJFM5AK6b18UUednZ+l6Viu6L3Mz02sQto+PXRPWd8y5WLTgCR5wDMNcKbC+YximXVj69NkMwzAMwzDMgrDk6U7OR8IIYRoRMhm9i2uX07J7b5JC3p1IN2eWZmjZPYxo/lptCf83lWX7bJ7yP9kx3eRQmJunbUqXejK6WWG+SEv6npLSpFrTTSxCMY92KbmgAMD3KFzfVPJJOS3h6qGSHNlWbKxuizknptgmzIjO3y3pSUShpIKJK2lWgkg37c6V66YKL9DfZxjm4ghqNQSGASPQ3RviMXK/mFNSP/UO6abTFe+i9CQDo8OyreatrB+IdIOWGPiUngy48sYUyZmkSw++8l+a3G3ryXT6vg23ybZoqVRZLFLC9/E3T2rbYkrS9FiMooT7+pdrcuPHXiO5BOndUkuC92KRrpPtkF7MZnVdXa2SOVe1sgYtek3m1tNPiWHaFl6xYxiGYRiG6RB4YscwDMMwDNMhtK0pNh+3YJkmki3mx5wSGdqfVUrCRLoJQ31l2Ypd0dTnsq6SsV0tRWO3RJaGLplHhUX7OH26oMspkaLzFVrqr4R6RFZXUklM6up9t5TSOaZB6/9WXC8jVC2TKSXlKPUcW8wgNaWiRlXJnh612BYKJdpfoULXpdSavd6vn38QsimWYRYCt1qBISJ0JfV7PNtD0anvuelm2R696hpNbl6JSD34xjHZLio6CABKhYJsTxfI/HpqQnfLyCpRsTApSvTxH/5PTc7576QLf2fsvfS+o7uDDA2ReRhCryZUmCU3l1+/RDVlbUfX/ekM6bhAcRvxSgVNTlHP6FeyJYQtOnh6hvphQqmnbes/i/l8PcrYb6nAwzDtCq/YMQzDMAzDdAg8sWMYhmEYhukQeGLHMAzDMAzTIbStj11fLgHbMpFxLO39RIJemxb5WSRbKkX4StqASEktIoTuZ+EpFSVCj3woItGSnkTxzxA2pRCY9/RQ+zCk/lUUH7RWf7T5Mu3/xIy+D8ck2WyJ+u5P6L4p1Tnyn1nRp6Q7GNBTIRgZSjXgzpJfTamkH3dunnzszsyRT+HRY3OaXGjVh00kOP6fYRaCeNxGPO7AtzLa+9UkpWA6UqR78uVnX9DkZqap0sOJk1RtwbH0sjGqbnED0mmqHy4ALOunn4bTE2/Kdjaup0+ZLxRl+9CRI/T5ZX36cR3a37LRIW3bsPJ6fIL8Aw++ckyTG1hGfn9HxxVd6Ou6NfLodahUzUi0pLCK2+SjXa2RXDab1eRsu/45EfE6CHN5wCOVYRiGYRimQ2jbFTuGWQoss15b2GjdoNQRNrRo4lZJpabwWyxoqvWLz6LxwbdaDxUCEG8lIZrH0fuoHlVdcW3dk9Y7pa+mYcAw6HlQTUTbcij1Y5pca/Jacb7LKc7ZhBACES8WM8w7JhaLwzAM7R4UEC336Pm0gzjvParTer+f+/4/S1e1qEhD29Yi13htaNsa9ebP8ZlWQqWYexSFLTqYzle7TmcpeEUXvpV+Pw9n7c6g40QXmIGibSd2Q30pxGwL2ZieaqMrRaYAQzOX6lfFUNKVuEqGcbPlh7g3QwWz02lKNVCc082eOWV5fl6pIvHmCV2u5JIpNqZ8F8tT+qW2HcXUOV3QtrlCqaihpDvJZXUzzR3XURHv4ikamKKiX4tcH5kc3Ar1o1TSF2zjDsmNDtGxBgb0YuaTxbrJNggjjO89jk7iwxtXw7FNJKwItlm/jrZlIB5rXhuBmAjRHG+2acNopNARQiBSXABqbk3e/GqlEdO0YTUqChiGgUQiJhWBWyvD9135mRCKad8L5M0/X6mgXK1/D6EwZAoaALAjUkUZx0DaNuWxLMU0d2zeh9twRQhhIAQpRwu0j1QqCauRQ2Iw34eR/nrqChEC5ZlI3nrpLiGrsjjxALF0/T6JogizxTl5LeZLNfiNtEB+GMKVKXgMGFZMHjiIDJn4x/UjFBv3XRBGeOHQBDqJZHIAyWQKpwu6vjt8jMyRr+7bK9umo+uTUKk2U50nFwvL1H8Qqi6ZTgvz1J4vlzS5o8f3y3Y6Sbpg3Zp1escVc+5//uI/ZHvl6tWa2Np1a2W7tzenbYsn6FxyWTKXmoHuAlJ21QpClIKlWpjX5MKQXEoSSdJppaIul1XSp8QVFx/P091wKo2UMb6vfzedwJ/9n/8DiUQS85UQnl+/Pyu1Gk7PzDQkBMKwCDTuRMOypK4SQiBSrolbIX1nGkI+AEcihOdXGnsDypUKosbvs+t58BvVUCzbRirTdD0wkIglpW5NmTYSZv07ijk2BvJZ+SDZ29sNp+Gylc3mkMnVx1csFsPIyHKpT7KZNCzLahzLgNXQi7VqDfv27pV9P7zvVyjP18eeLwx4UX0HQSgwMe1KHewVZxE1xr8QPkRUH5OGaSLdlVQmdyFdP9OUfRAC8ANT/jaEQSAftg3DgNU438AP8O///p/n+QbPTdtO7BhmKUglbMRsC0k7hNOY2FmWiWS88QMhBGLKs6NlWvIGjKIIUUiTvJhhy1U19cnPMC3YMUfe+MmkA7OhwKqmB98LG59pTuwMCCFgmvRUFwaWPFb9Yc6o/wnAFuRjkXRMdDn0g2jZRuMJXSBhmzAaDw6BMBDKTwnYoCfmhGPBbuSCTCdiyKfrOb+iQMAqR/IzXUmg+WxgJ3wkuuqfCcMQoU8/sCII4DcmmH5gwFafLyxbXhc/Iv9Y0wzhNZbpOH8iwywM+XwPkskUzFgItzGxMysVlBp+i0JEiEIBqcEskx5kowhR4wFNCAEDtlRQhhGhuVgWRgFMnyaDfhTJCUxkmICcbNlwEjQhisfTUi8mTBuphm93zLaRyWSkXD6flz6cuXwe+e562dFYPIbBoSEpl8t1ST1mWQbsxmSwUqlgcnICQtRXKqeyGdhG/by8CHBFvQ9+ECFZteV5WH4VUeOBOopMWnk0DaRSCWViF9D1MwxYdnMfgBeYaP4u+H4gJ5f1h3BLvn+hsI8dwzAMwzBMh9C2K3bdXUnEHQu2V9DejysmiFScsoW7VX353FeK3efz3bLdahf3Qprb+r5SyaGrS5M7OUVL/6+/SSaCqXl9Nq0WaViZpOX9e//bzZrcyDLa/7/seUPbtvswmZmCiEwdTdNgk/kCFequlKh/mYyjySEk81siQdtiCT3iOGXQtkCpir1CKSoOAJmZuknD80M802Gm2FTMQcyxEEMIs2EusA0DtpLOPmbZcs0uisjfKwIQRI0ldxhItERqq95ooumnZgBRFCBqLPdblgmjUW0lCAXKill9esZF0PAFKVU9lBsVSyJhoBoY8gg9dmP1DsBQdxZrB7sb+zaQy8Ya2wRS4zPSrWCm4mG61DQBi7qdtYFpCFkBJQo81Cr17z8KBTyXnjKTSRNm4ynVCgXQuBYQERzHkk+0tm1KU4wwTMQMS14fobhQpLNp2I3IxZofIFau90+NeO8U8t29SKbSOHzskPb+qaMUaZpy6B6fK+uVIkrF07JtRHQNC/O6ibVQJR1nx+l+7xsc0OSSiovK8lU3yfZoi8448l+7ZdsySFf5of4dTZ2haPwbblivbbv6mqto/0rka9ft79bkfnNgXLbdGrnNuE5LVCzIxBoJ0mMTEyc1uZhS1SjXrZ6/ni2gWq27zXSiKTaZTCOZSmNmfhbVWn1sVMolFM7UXYwMCNhGBWjc/37oySpPURjAq9WvlRBA5Ptyxc4LfOm3FkYRKh6Nu0BEkK4sMUeuYBmWgVii8ZtuALneYVhWfYxmbQvphg4WgY/Z6SkpF48ZcsUuDEN4jQohtm0jUHTFyMgyxBtR3emuFDINs68BA93ZrobeA65aMQy3Unc/KJZrmG5Eo/uBQDlNeswTHqKGGVnAR9TIuGEYgO3QSlyl4iJoyJmmCatpcTAMxOIZKVc3ezdXSgV8v76/ixl3bTuxY5ilIB6zEXcsmL4Bs6F8LAOwlFJ0tkNmVN8LGqaKhtNxQ0YAiMdicqKjPlBEAgik+VQgiiJp9jRMU04iQ0QIwobiEECp7MtJTakWotownYRCwA1pMpeLG7KsUj6Zwqr+XgCAY5sY6EtKU+zJ+QCFal15BFEJc5XmsQRCqBM7SLOKiEL4DSUdhQKBTz/mUWhB2E0zDQDR9NkT0kcPACzbhNUwbwgDsJvmaiEQKlER6WQMiUT9Bzzm+vLaen7nTewYZimIJxKIJxKNiUT9/ndrNVRLTX9EgaTtSf1UrZXhN1J/BYGLWqWxyCEEIMiTuFKrwWtMSPwwRFnxW7Ri5G6RznQh2VysMQxYUrcaSGa64Tj1+7/LNpBt6Ba3UsLsCVoMqWSTiDV9oA1T+uWapoUgjOSxMpk0ko2SfbZtQ67dGAbSqUTjNAQG+roRuvVJvzNThNdIn+MFEVJx0XgAFbCjJEToNK6SDwFH7kMo+jMMIzk5M03yqTMME3baksFoobKYEgRBPYgDkP8vBDbFMgzDMAzDdAi8YscwCo5tw7EtGJEFNEwJpkHL6gAaq0pCtsOG6UtEkE+LBgxEkWguWkFNDRDJfdQlnUZAQ12KVv0iAVRrvnzfDyL4gZDb5POxYQCNpz4D9ZQtVsNs7xgRHKP+JOjAhBn4MGBAQCBh2mh6CySdOBK219i3gKuYMERE4fau52G+ESUoQgHX9+V5JYMYombUbWTBicjRWIsKtkxYEUXqqtkTfCUwwkAE0XCpsAyBhNM8R851wjALQbU2DxghKtUiytVS470SXJfM0cJ10bzJg4hMsQaAVGNFHQaQiicauhIoVV24jVU6P4xgVclMGU8mYDRMAJlcFql0GgBgx+Lo7msmqzaQSHTBbJhiLSOEEM0VrAi1Gpl250tlaYqNDBtmo4CA4ziIxequJwbqeiwMaB9CNa8ISvFSK5XgNt1NPB/JRkSYYwG5rC0/NxdWEHh1/RSEQllxE/IaAUAUhtLEGkUCUUP3G6alWWuE0gchhDRlR+GFr9i17cSuv7sHiZiN6kxNe980lHQdFSXE39Pt0LahpIlQTDetS5RVxZSU7ybfDC/UfzzeOE7+GTNF5UfP1jOxqyanbILkBmw91D4xQ/4y12T1TOynemgfkwXynXErenb4lw6RP44Z0A+in9YzpyOnpCsxldQCuZQmllHMYDVl6Vx4RU1uVX+6IdN5Pie5rgwSMRteKUDoNaLELBuGMnJqLn2vXhDKSZoBAaspZxhwFTMAGUrrPieeMoGJpbpkvqW6a1pDwQQRTp2pX3shBOZKIZofC01D+S4NWGZ9umMYQDIWId44WMbxkDfrPiIWDMSKXl1IAIOxLnQ1jhv5cbherNG/ENPzDSVjAJEfIWqMr+lqEdMzjfEgBIyQpmxhMoVEWO9T2kgCyeY4FIBhofnjkEjEEIvR/Rkp485VIsNswweC+hhLWBYyubp5pBPH3ZEjexBPJHDg9cPa+ydPvS7boZLGJJNLa3Lrrlkl29evv162T01VNbk3p2gf/UOkF1au0dOTZHrJ52xyVvmRP3NEkxt/k/zepgrkR7f+Ok0M/9ta8qsrl/Q+qZYm4ZGO2/fcbk3umnU3y/bg8rxsP/fCM5rcxCTpK9U/qVbV9efsLOnkZBftLxK6z165Uj//oAN9OydOHUI8kcCxk6cwM1c/z3KpjOmZup+3EAJRuSIf3hLJOJxY/R5PpeJYPlwfQ5Zp4Zo118Bu+MvNlXxUqvVr7/oRJgr179wwDPT09cJsRHz29Pch00jjZTkxpLJ52bfpYlVGwFuVWaBaAFB3fzk9Sb+Lc+V5mI3f3b7+QfT314/b1dWFa6/tkccNgkhWnvK9EM2vOQoFhE9uKKeOvIH5mboPX7a7H8ub6Z0MCz2DGflY+eqBEorF+v6q1Qjl5gOvENpYcV1PjsMgDGXbsix0ZfPyWkQiko/1QRig5jbSil3EuGvbiR3DLAWGQatnehLMs33lzrODiz5u4wC/5T6UlS75DgVQqLs4O9EnBXKc6xzV12+ZBPlCOc/1bL2250vuea7vhWGYpeNcOuPt2ufbZiivz60L3qFuvUjImkKv305XLTU8sWOYc1A3EaoTiWb7LT8FPVE2GQ21LeeYdUmrgLJid3YG+HMfqd47qRb14g3K/mDUTbiUVf3c+29aR0Vzb8oEr/UMm/uh4ylb1bYhzv7gBaKaKRiGeedEYVg39am3KvTJVj2Cv2G9ME2ZW86yqG2aZqNiDwUGSMeUVv35lkq0pQpG84U4W1eJxkNwvRIN6QapP1vkTVN/aD9LjzVz60X1gDagkauv2TaksQOGYTSq8Jz7d0E/tnGOlvoh6u+5KvRcjL5r24ldvrcPybiD7i49ZYRpUoh+oUgh/35L5nRTLRECWloXLRnbu7oobN4Htfe/oacdKCs+B4kEhcknYvr+kmkyb3ZbZAbYc3hSkws8+pyb002x/d3UD0MJ3fcD3Sxd8cikoabF8ALdVGUo5mZ1ZDmmPhqFqVS8sKl/gevqcs0o0LDzfmAz+W4k4w5EOgE0fCYMmDDMZkoOgVK5RGk5XFdebwN6qgmhjLvQMqVfiWGYSKVoHEdmQibinSpMo1AsNnbtoap8x7bjwGo47dm2ISN1DdNETBmT3VYIu5GeoFj1sW98utkhiMBuNlFJ9iBsmHOFKdCTrZtig8iGgS5Khhx58nyDKITfOF8hgFqNxkAQhTAbt10YBECgpCBShpoN8j0ULf6LtqWYaAMlYacAOTB2YH7iX/3nz2E7NuxBvbLDmvU3yHZSKW6//rprNLl1a0dkO6zRNRSmbvYsgyrl2A7pGcvKa3J+QOOpPD8j27kWM3ig6IDx06SPE10nNLlctlu2r1qzStsmFDeHaoGqBB14/mVdrkrnf/3mD8j2DTdepclVXyRT7OuHj8p2KqWnsMrle5VX9HtRVH5XAMB1633qRFPsKy+/AMdxECZ7YcXqv13pfA6jyWa6G4G4F8mHtKFlA8hm69cxnUpgZKSvKQZTxOXELpbwUWrk/qr5Pjyz+Z0YcGKUvNeysjDNuinWMGwEYV0HCSFQrcwjaOiauBsg3nCpivxQGXcCU3MlNF/6wkGzCEufH6ArnW24uRgYWjaIeCPFT92c3HiojgzU5mty8nji4BFMn6pXfOnuO4PKTD3y10kkseLWDTJB8+rVA6jW6n2fmp5BdKxZGSfAyROTckJoWjYSDV9BwzRhNn5LDNOAYZJiq9XKsupJEATwPDbFMsyCYFlWPeO3ZStPVAbMxgQoElE9ZF3QJK15o0MxK9angMrqmRLggMZn6KnRQNP7MwiFTGnih+RzUX84pf2bRt2vrr47A7Z8GhUwTQNWQy6MBLzG/kTUnKvWjxvEI4hmyTqD/EMFhJw0CiEAQXn36udf/0gUQZuw6Q+WQn86FSTbfOIlaNXSVN4XhtG6U4ZhFhCvVkMUBjDjkXzwNGHCafrACoGYci8nE0mkUnX/zlQqLttCAJFLD2mWVc9XCQBWBJmPDmg8KDeDvQwThvSHNxt6rjnhUmqzqsWhW1b0gpCWboIwlLk+wzDSVhgdx4bj1CeOzcles+8iEnJi53s+fLfhi+d6CBp+n6ZlwTIB06qvEjqOjVA0Joq2Jf38jMhsBGc0tbsh+2CYpvRDbLUs11fsqOKHYsfBhcLpThiGYRiGYTqE9l2xM23AdGA4znlF4koVhRT0KDFbmbOaSnJZv8WOE5dLzsCZCYqSqpzRl+Ov6lEynSsW0URajyxdt2Y5HVcRDCz9PNTlftua07ZlYnQuvd1rZHvNNSs0uSPjv5LtA4fI9BGzW0yngszUQUBfudkS0evEFPOgYlKMWh4t1KetTuNffv5S3RwYBbJyQv0Jkuqoer4nn8aCMETYNMsKwNBWmKhtKE+IhmHQKh8ANOrBAkClWkGtYfoOowjVmqfsRk0arPuLWDaZsJIGGbdStkEZ21E3O8i9OfNyFTGAgN/06YgEXCU6MVKqUERRpJ1voNSRTBSrchUx5thIxhsmvNYn05YnUNEq0Hxffeo1DGkO78RasVMnpmFZFt5904e09+NxqsTQoxR9WDasR77PFEh3HTtMplMvimtypkHfpWUrrgJC1xlQ9ETokjlXtFz7rlyfbE+XyF3FjOn6ODrPfVHfqOwvQee1anhUE0tY9DkTpNNuuF6P6M3n87L9k+r/ku2JU7pOXz5AFXVCg3S10+KuU2y4RtSjGXUXncudgwcO1f3jYscAi34PmnrCQH3Frcnhgylpzqwn+aXfRaFUcaq5oUzNFIQRyoo7Tz0KtL7/epRt/biGYcCwqSZ3pVqV48YOXZhhM6K1htOnqOKRF0WIGmNqfuoUJhopWJKJJObOnGwYXgyku1Ky/qphGDITQRQGKM00XBQEMH5gL6rl+v2UGJ9B8mD9t9V0bOT3HpV6t1wtywpN1WoNxUaVlyiKUJovyVFuGhZZKAxDpoSBAVg23dSe5yqVJyK58qhmDfhtad+JHcMsAb94+fW3F1oyFtrHZ+7tRRiG6ViOj594e6HLiFaNtn/fywt8hH0LvL/FofOWXBiGYRiGYa5QeGLHMAzDMAzTIbStKbZWCwBhwPCrLVso3L5cprB2z9fnqIFJtv9ShfxPihW9AsTyUboEIqBtK/t0v581w+R/VqnRtuVrb9LkYoJ8NWbnKN1DUgutBzBNtvXRoWXapkKZfFWuupbSGmS7dX++bDdlc5+dor7PzukL0o7i72IK8rnxW4oLK251CJWM7eY5onfU/wzDvDOS6W7Ytg2n5ZYqKJVn4j152a4Euq+bUmEJye4MfSZquXlratUc5W2/ooklkoovrqH4W5ot6aJ6yU8tJsi3z0p2a3JCrTRi6McyQkU/WbR/J637ACe76HXgkr6bPqGnkupNk1/iRz64WbZf/K+jmlxJqURRc6dk263qvzn5TB4AZCoKhml3eMWOYRiGYRimQ+CJHcMwDMMwTIfQtqbY0AgRGiZEqGc6V81/yQRVpejK6GbKk0rx6yPHaZndbrF1xCZPynZtkuSuGdDTk7z/LjKJvn6CTA6Z5f2aXF8vVZE4PUUmgnxeD/83I9p/TKn4UP8cRSrZiYJsTxVOaXInTlHIv+PQ+eezupmmWlXKlNg0lzdabKxRpKfTIDl9/t+BBScYZkkZGl0Jx4mdda/VauRuMlkkdR3L92lyfkBmSjVFVLWkV+TxBe3ftsktI7D0tCipLKUdGegtyLaY0c2UnuKyYUS072SypWKQouIioev0UKkSZDpK1QxLvxalMplf1Qov8ZZrVlT0bjLVI9vvG7tRkzv4+puyvffVCTpOsazJxRoVOnxf7zfDtCu8YscwDMMwDNMh8MSOYRiGYRimQ2hbU2wul0YyEUNg68vfpRKFfwmflvDn5vVI0DfHJ5XPkDkimdDnsqeOkKljMEHmjOXLV2py+WHKbu7MK6bOhG6yHblpA22aIJNqMpjS5ELQeZTLNW3bshSZdz0l07uR1otYj6QpIi2TJxPw/PSEJnd6clq2fYP6W/Nass2bZGNNxymq2Kvq5pxmhYrwPBUDGIa5MIRhQRjWWea+yjyZH+OKeXO+OKPJeTW6lytF+ozTcotm0mRy7e8mM2W2R3cV6c/TsUKbqvNU43r/ZlaSDnJDxVWkJco2DJTI2pZI3dBUdJxiis336JG1UUj7VKP2cznd7BszSI8V5guyLXxdj928nnRmPkPX5fHH/5cmNzVZr0pwMcXYGWYp4BU7hmEYhmGYDoEndsyS88wzz+AP/uAPMDw8DMMw8KMf/UjbLoTAV7/6VSxbtgzJZBKbNm3Ca6+9psnMzMzgE5/4BLLZLPL5PD7zmc9oK7UMwzDtAOs7ZrHhiR2z5JTLZdx0003YsWPHObd/4xvfwLe//W08+OCDeP7555FOp7F582bUlKysn/jEJ7Bv3z489dRTePzxx/HMM8/gc5/73KU6BYZhmN8K1nfMYtO2PnaluRkENQe2p1eKcAxlLqqE0NuWnjKkUiKfu+4M+Y/k0wlNrjpLPnYDw1QdYvmNv6PJ7T1OPiKHDlP7jmU9mlyhQNsG11BVChO6z4mnZDrPCz09SfE0+cQllWzny3pajhWSX4hzI/mjVFvSovznEz+R7ePH6LhWTPcPhOIzp2RIgd8y/zf9ep9qCxT+f8899+Cee+455zYhBL71rW/hK1/5Cj7ykY8AAP7pn/4Jg4OD+NGPfoSPf/zj2L9/P5588kn86le/wq233goA+M53voMPfvCD+Nu//VsMDw+ftV/XdeG65JdULBbPkmGYS0bgAQZgR572dk5RV6M5uj+vvSqvyXUpqZ8sRUeWiwVNrlYhvZhMk25Zd42uW0ZXjsi26ZC/camg7290GVXNWXeEqmRke3Q929NN6VNsW68oESm6RihqPJHWU1gFNaUajvIZpzVFDOi+7u0jv+RSRdfB5QL5Ii/vJ7/me//gbk3uR//67wAWLt0J6ztmseEVO6atOXLkCCYmJrBp0yb5Xi6Xw8aNG7F7924AwO7du5HP56WSA4BNmzbBNE08//zz59zv9u3bkcvl5N/o6OjingjDMMzbwPqOWQh4Yse0NRMT9afqwcFB7f3BwUG5bWJiAgMDA9p227bR09MjZVp54IEHMDc3J/+OHTu2CL1nGIb57WF9xywEbWuKNQ3AMoCwJdWGUMyFJmhpPDR0U+ysUq+5WFQqL7i6qWNZjsy0t/3u78r2yLrbNbn/76F/lO0hJe2I5emZ2E+88TrJXXWdbCd6r9bk0oJMzJWZ09q2ZERmVa9K5oMz87opId9PKVh6h1bJdrWU1eRM5WUYIz+N1soTvk/XxlBC+w2hh/kHQX3Y+JdxCYp4PI54PP72ggxzCbhzw81IJpK46rqbtPdPnqCUScuHyVy69po1mtxQP/3QW4Lu63kl3QcAuEoaEvX+70rr6U66usiUasXIzOu0mIqrZXLteM/1ZLJdtXaVJudHpJBFy3pCEJEeFxb1yXL0nye/RvomUsyipq3vz0goek3Z5vq+Jmdb5IoSegXZ7u/T00q997/dBgCo1lw89pOf43KE9d2VBa/YMW3N0FA919Tk5KT2/uTkpNw2NDSE06f1yXEQBJiZmZEyDMMw7Q7rO2Yh4Ikd09asXr0aQ0ND2Llzp3yvWCzi+eefx9jYGABgbGwMhUIBe/bskTJPP/00oijCxo0bL3mfGYZhLgbWd8xC0LamWEPU/8KW5XO1SLa6Ai+qLXJKoGlPL0VXDaX0yKb33LpWttffQebX2dO6CTgeUDTZVSMUMRYZekTr0ABFV6lRXJWCbsLwAtrmV/WvIQSZAl4/cVy2X9n7oiZ3x+20z94hiugtzutPc44SXNa3ikwuUUs0WeiRyTVQTNZzUwVNzp2v79D1FyYTe6lUwuHDh+XrI0eO4OWXX0ZPTw9WrFiB+++/H3/zN3+Da665BqtXr8Zf/dVfYXh4GPfeey8AYP369fjABz6Az372s3jwwQfh+z62bt2Kj3/84+eMEGOYduPd71qLdDqNd71bN8VWryeTazpHPhW61gGEobioKCbGnrS+giOUW169+6NI32OgRoAqOth1ddeTNVevkO1kjHRLtaxXAhKmouMMXd8JpVJEJKgdGrqrSKSEz3pV6kcY6WZk01bddegs56d1V5Y3j5Cf2Z3vfbdsV3w9E0OqYdo1xMJU2mF9xyw2bTuxY64cXnzxRfyu4t+4bds2AMCnPvUpPPzww/jzP/9zlMtlfO5zn0OhUMB73/tePPnkk0gkyA/on//5n7F161a8//3vh2mauO+++/Dtb3/7kp8LwzDMW8H6jllseGLHLDl33XUXhDh/IIZhGPj617+Or3/96+eV6enpwSOPPLIY3WMYhlkwWN8xiw372DEMwzAMw3QIbbtiFwUhIstE1dV9P2JKqhHbJl8Sy9R92K4eopQhiSTNX1et1BMz3vReWhJftu5G2X5590Oa3IpR2t/Qu26g/vTraQfsVE62KzXy06sWdb+NyZPk3zE7eVzbFiopCZIZWn7v69MrRRw7+ZJsDy5bLttBpSVFTJUyjhvlWTqO0P1lVF+XZJyOFRvSj1uM131Nat7C+JwwzJVOIp1GMp1GV0JPSZFOKSrappROUcuCj6H62CntqKWqTeRHyjbaidHibxsoXnxqViRh6HJdeUrBEoT0mTDS008hop0I6L65pnqAkNqhresdAeWkAyU1U6TvL64c2wmpv+ma3icxSfpv6g2KQh1ZN6LJnTEb+tS8fNM7MVcWvGLHMAzDMAzTIfDEjmEYhmEYpkNoW1OsY9lwLBuzLdUWwhot1SdTSuHrlmXyASXFybFTBdle854PaHIjN6ivydzqz5c1uVyGTKz9a2+W7bKtF8/e99KvZNut0j6KLcW4z5wYp76Huhk5kaCvZflqMrHeuFavXhFYFObvWHlqx1oyrNeo2kTlTcpkHwUtFSWUaX7JIrNFqldPJzA4XE+tUq0tTLoThrnS6cp2I9PVBWHp5seKknZIKEXc3ZYKOuUS6RpPqSDjurouCAIyl/pKGhO16gwAVCqkdytlciMJWtKiZHpIL2ZyednOZ/o0uUQsJtthS/UKGEoVCaWaUEZxQwGA6dP0uZpSkShSKvUAgAE6VhTSNctmdDP3yhVUtqtaoesnIj0lVi5T13+O1WJeZpg2hVfsGIZhGIZhOgSe2DEMwzAMw3QIPLFjGIZhGIbpENrWx86ruTCjEKm43kUjoYSym+QLIULdLyLZRXIf/qMPy/Yd97xfk8v2kZ/F5Bv7Zdsy9f0V5qlEztTRg7J9cl73M/uPH/1ItruS5C9Tc/UUJEOD5JuSzeg+bEeOUyoUT+lHz/AqTW7tDbfQi5D8R2YKevqUiuKXOFul/RlCv7a1KvnPlJRUCKJU0+TW5xvyLa4yDMNcHP/6xFNIJBIInV9o78/OUhqO0twZ2W7NvKH63KkF5MOWvCg9/QOy3d1HZQjjlq4LyjMF2T70GunFYknXY6OrV8q25ZC+y2Z6NbnVq6n02MioXuZs9VXkR9wTJ12VSej+hpFSUg2Kv5vfovstpdakpexvcFWL31+WdKYvSI9bMU0MPT3148bjen8Ypl25oBW77du347bbbkMmk8HAwADuvfdeHDx4UJOp1WrYsmULent70dXVhfvuu09TNAzDMJcDrO8YhrkcuaCJ3a5du7BlyxY899xzeOqpp+D7Pu6++26UyxRR9KUvfQk//elP8eijj2LXrl04efIkPvrRjy54xxmGYRYT1ncMw1yOXJAp9sknn9ReP/zwwxgYGMCePXvwvve9D3Nzc/iHf/gHPPLII/i93/s9AMBDDz2E9evX47nnnsPtt9/+Wx8rEl49a3pLVnFDCdcPBIXrG4ZuckjEadn+5lvIZBl39OX0V1+m6g2zJ1+XbdfVzY/zszOyfezwq7JdEklNzgnpc11KpvhsQje39neTKfbU5IS2LVDSEFTmyfRx7Mg4dPZRP0qUkiBh69ciiJP5ZTqg65JM6ukEUhk6l6RNZor5SlHfXyMdQBBxuhOmc7mU+u7nv3getu0gP7JOe1+EdP+/9Mufy/bKEb06Ql8vmT5PHCd90nqPpnrysu2ZpEsnFfcPAHj/hjHZvvnGd8l2pUUvmg79hBwZf1O2D732uib3yl7Ss/lcl7btvo/9oWzf+a61sh0T+rrDyDKqGuQppljD1CvgqBU1fKXKhWm3VKjIk/5LKpU3Ikv3MWn+Ytht67jEMDrvKHhibq7ud9bTU8/ltmfPHvi+j02bNkmZa6+9FitWrMDu3bvPuQ/XdVEsFrU/hmGYdoP1HcMwlwMXPbGLogj3338/7rzzTlx//fUAgImJCcRiMeTzeU12cHAQExMT59hL3Y8ll8vJv9HR0XPKMQzDLBWs7xiGuVy46MXlLVu2YO/evXj22WffUQceeOABbNu2Tb4uFosNZRcBiBAF+rK47VBFiVCpnOBBj4wazFE28n/7yeOy3TO4T5MbUJf3KxT56jh6lvKuNJkwbZPMAOkW0+7QAJlEqvOzsp209P1NT1GEm+/pJoJMgkyinhKF9tpLL2pypw4ckm03oILWcPQM6aHa3xHFJJzWr60ZJzNLQsm+3g3d3Lz+XasBAJWqD+C/wDCdzmLru3s/9r8jmUwhPnCNJl+Zpwnia6/QvbZsSJ8QmoopMZkgXeVFVU1u7fW0/+5l5KJR6dOrN/z+PbQKqbpolFtMsZFiBQ0EmXZrgS53+jS5srx55KS2LZWi/k4cn5bto/te0+RMpYLOGxOnZXvD3bdqcitXDcu2GjFrJlrCXR3Su4ZabcLQ9XHMqJ9XzGkJRWaYNuWiJnZbt27F448/jmeeeQYjiq/H0NAQPM9DoVDQnmInJycxNDR0jj0B8Xgc8Xj8nNsYhmGWGtZ3DMNcTlyQKVYIga1bt+Kxxx7D008/jdWrV2vbb7nlFjiOg507d8r3Dh48iPHxcYyNjbXujmEYpm1hfccwzOXIBa3YbdmyBY888gh+/OMfI5PJSD+SXC6HZDKJXC6Hz3zmM9i2bRt6enqQzWbxxS9+EWNjYxcUIcYwDLPUsL5jGOZy5IImdt/73vcAAHfddZf2/kMPPYRPf/rTAIBvfvObME0T9913H1zXxebNm/Hd7373gjsWRQaiyEDM1v3FEjb5cUAJcxeWnk4k8ihlyJkz5KdSmtKdmpM+RaVFoGP1dOuZ0/PD/bIdhK5snzip70+A/DBMky6vF7RkRzfINy+dSGnblIwusNQXLSldQo98Ak3F2aVYmdXkvDj52WSGqe/lZEGTm4/I565WpsXc3uxVmlxfw4+wXObSE0zncin1XdwxEY+ZOHRgr/Z+cY70i1DTeHj6vVcqUW49wyBdkGipluBXKC3S3BTtb3JcT3fys3/7mWzPziufKc1pcpks+cfluntkO53Vzc3Hj5Nf3UDfcm1bIku+fr/4VzruzGu/0eRCRacfnqAk0MfL85rcNevJjzCXJd2aU1JMAUAyRelOcmm6Tk5C/81Jpern4qm6mGHamAua2KmK5XwkEgns2LEDO3bsuOhOMQzDLDWs7xiGuRx5R3nsGIZhGIZhmPahbXNpm0YcpmEjEddTbQglrUk6Scvs6Yxe4LniU2h8b4bC3O2WtCjeHC3pRybJVRx92X1wkBynI8UMsu5GPQP8L39OjtSeqMi2Y+jZ0asl2pbNZLVtMSXFuWVQP0o1PYXAkVNkci0U6Lxco6zJ9a+l+fvyvJJKRejh/7NnqE+xmmIqXq6bpauVejqAapUrTzDMQjA/M4mgmsTTP/5X7f1jE8dl2/TJpeI3v2lJbKzol0B1+zB0PfbU40/LdkxJ6XTzu9+jyXmxjGwXXdILb4yf1uSmp/fTZ2p0rJMTRzW5I0dJ7tZ336Jt+z+2UPqXF56jxM7B3LQmV3TJjaSquLy88aJuRv7FnlOynbbJfOvEdBOrpUQnZxRT7MjKVZrcR+77OACgUuF0J8zlAa/YMQzDMAzDdAg8sWMYhmEYhukQ2tYU69gGYraJirL8DgBWgqJfI6WaQ8XXM6xbSpbweIzMj46jR8/GUhQplcvStompSU2uspxMrgOjV8v2idNnNLl33XanbJemKBLsjUN6xYtyqSDbtqX3PZcj06wBMm+cOqFnbB9/U4mKjVPfs4N6lG1/j7I/xZxrzOjXonuWhsPyAYpwG8nr5ubDr9Yj9ao1HwzDvHOGBgaRSqVxzSo9V55Q7n/bpLbV4tphWvSMLiLSfbGEfo/DoUjQ4WGKTr1r82ZNLJNSokkTVJXi1b16pZlDh1+nc1i+SrZrQl8zsBS3mb2HDmjbXj1EFXRSq9bL9smTejWM7jy9HoiRG0mqS3fXmZl4U7anTxyW7akzuk6vhUqUsZJV4FRB/1m84/31bdWqfs0Zpl3hFTuGYRiGYZgOgSd2DMMwDMMwHQJP7BiGYRiGYTqEtvWxG+g1kUqY8Kf1kPdqSH4mZSWrhzD11Bu2kjIkm6V0HTFHz8ReLVPagKSjXA5PvzQv/vKXsn3VOiXr+XG98oSpVMNIKVnfLUvPxJ5Mku9LuaT72FWr9DoIKLVKV1Lfxx3vXivbCSVlSmDpKV1Cn9IVVI+Rj505n9DkBlKU4uDda99F7+cHNbk9p44AAGqefhyGYS6O2TOzqCVd3L7xDu39O37nd2Q7Hqd0HbalP5ObJr2OhOKLBz3Fh++Rnqx6pBemjx/R5GYU/9mZMzOy/YbiUwcAJ0+T/usaGKYNcV23GDHysfMC3W/6qV3PyvbKNTfI9mhPS4UKpZJPSknV4tb0yhNvFMmfuUvRi6HQ9dXEbEm2+/pWyXbF11PEPL3rBQCA73OlHebygFfsGIZhGIZhOgSe2DEMwzAMw3QIbWuKHRmJoSvpIGfoS/qHj5H5YFIpYu2Fupmyq4tOrVyhtCBhVNLkLGVuOzNFZt/5kr5sX/NpH5agdqZLD8mfnCCzxfEymT0joYfKD/aTediI9LQhswWqKBFP03nlcxlNLqaYY1zFxAJbNzeXXZLzSkpFiUif1189OiTbw0PUv2PH9TQB01P178D1ufIEwywEqVQcqWQc00W9usxLv9kj2wMDpGsGB/RKO75POmR2tkAbWqrV2IquWb6aTKej3bpuOXGIqjeUS2Q6HRgc0uRSvXnZthJk9qxU9eMuW7ZCtidOHte2nZkmfbpsmPxrjJZavSVX0ZM26UU/0vVQXHFziStpYbzpKU0OJunCQSVVi+fqJtdmN36L0sEM0xbwih3DMAzDMEyHwBM7hmEYhmGYDqFtTbHZvIOulIPqVEV7v3tAifJKU6TVmUk90qrm0XK6HSMTgdcS2BQp5kQ/pH3MVWc1ubQSkVqrkJmhWtMrT3jK/kKlLYQenVYq0nlls3rm9GyWqmFUqyR3ZlrvU1cXmRwMJSrOCHSbQcym/avBarGWotirrl5Fx1UKXj/zzKua3G8O1QuBB6EePcYwzMURtyPEnQhuraC9/8tf7pRt4ZPeyaZ0neH75DpSU6Lq7ZZn95WrRmX7+tuvk+01K4Y1ucIxMpdOzJKOi7VE5q/pJdPs1BS5udyw7npN7l03rJPtH/y//6Rts0FVJHzFfcXzdHOuCBSTa4LO14rrfVq1+irZPn3sIG0wdX2XVNxc1q+nDAO1iu6uM7psAADgunp/GKZd4RU7hmEYhmGYDoEndsySsn37dtx2223IZDIYGBjAvffei4MHD2oytVoNW7ZsQW9vL7q6unDfffdhclIP6BgfH8eHPvQhpFIpDAwM4Mtf/jKCgPPsMQzTPrC+Yy4FPLFjlpRdu3Zhy5YteO655/DUU0/B933cfffdKCvZp7/0pS/hpz/9KR599FHs2rULJ0+exEc/+lG5PQxDfOhDH4LnefjlL3+J73//+3j44Yfx1a9+dSlOiWEY5pywvmMuBYYQ7RXEXSwWkcvl8NK//HdkUg6mjz6lbZ9TUnQUq+QiWJjW/b2Ks4r7YNgvm+mEHq4fqpUs3IJsz1f0ihcpxU+tK0U+cK7Qj1tRqkj4Lm0zhD6HTscpDL+rS0/pYivpSvyQQvxPTbZUuVAqW+Ty5Edox2K6nJKl/UyZ+jdf1DO2f2DTrbRNUTT/1//9E01usuH2F0UCb87WMDc3h2w2i4VgamoKAwMD2LVrF973vvdhbm4O/f39eOSRR/Cxj30MAHDgwAGsX78eu3fvxu23346f/exn+P3f/32cPHkSg4P1KhkPPvgg/uIv/gJTU1OItVyPc9Ecd8zlw0KOu6WiOe7+n29/D6lkEpMF3Y/r5Bnyb4s8uictX1+diRQ9JizyJbNsfewnFL/kZauXyXYaesqQGSXF0d7j5Nv7y+ee1eSmpyiFyFWryY/utjv0ChppRcf97Kc/1rYJn36ChpS0I6alu4BHIZ1zTKkSZMf09E7r1pGP3dEDL9NxQr3Czwt7XpLtG9+zUbarakkjAMMD9d8Pz/fwg//5A9Z3zJJwIeOOV+yYtmJurp7TqqenBwCwZ88e+L6PTZs2SZlrr70WK1aswO7duwEAu3fvxg033CCVHABs3rwZxWIR+/btw7lwXRfFYlH7YxiGuZSwvmMWA57YMW1DFEW4//77ceedd+L66+tRdRMTE4jFYsjn85rs4OAgJiYmpIyq5Jrbm9vOxfbt25HL5eTf6OjoOeUYhmEWA9Z3zGLRtulOyiUbRuQAVpf2fleaTBVOkpbw0y1Fp3M5Mk2UilWlrTuhlipKupMatTOxXk0u4dByf+BSWhTb1ufGMeWloxTtNgxdLqVUxjBbvoVANTkkaWM2n9LkZmbIlDqvmISzPXrfKwHleHntKJmYD7xyTJMb7KFl3sER5Vimbm7ua1TACKMIb84uXAqALVu2YO/evXj22WffXvgd8sADD2Dbtm3ydbFYZGXHLBnptINUKoZci2NMpp/ScLiK3km0PJPHDDK/iaSS3iilm+WiGqXymJ+nVRsrpZt4BtbkZXtNiszBrx15Xe+gQTrOSZHLx4lT45pYb1/3OdsA4FXJ9Om6VIWiXNZ1i6ukIfFdSgNlJ3S9ODhMrjdvniJ9Pzmu971WomO9vu9l6l9vvyYnuuuracJf2PROrO+YxaJtJ3bMlcXWrVvx+OOP45lnnsHIyIh8f2hoCJ7noVAoaE+xk5OTGBoakjIvvPCCtr9mFFlTppV4PI54S/4rhmGYSwHrO2YxYVMss6QIIbB161Y89thjePrpp7F69Wpt+y233ALHcbBzJyVqPXjwIMbHxzE2NgYAGBsbwyuvvILTp09LmaeeegrZbBbXXXcdGIZh2gHWd8yloG1X7E4eA1IJwC3oJtZMP5kpE0mKGM3pFlv09NCplcq0bF8o6JUsZqdjSpvetyI9S3mkBA+HoRJB1lKAWp0pGyZFvlq2fqmrIUmKlvRDjlKoO6jM0HGret9DJXq2UKJtnt4lzCim6KOH6SQL03r0l1emDw7l6Mlv/crlmlxzd34Y4ddHZ/BO2LJlCx555BH8+Mc/RiaTkT4iuVwOyWQSuVwOn/nMZ7Bt2zb09PQgm83ii1/8IsbGxnD77bcDAO6++25cd911+OM//mN84xvfwMTEBL7yla9gy5Yt/JTKXBZUSoeBMAFE+rO2Y5Bim5wk0+Frrx7V5BJK1H4sl5ftvgHd7DncR5GQtlKtpjenu2+oRWVqShWegQHdZLt8uEe2Tyn+XYcO7dfkVnk0gVFNygAwP0/nVamQ6bQ4pzv4q6bY0COdZsXTmty+vX2y7bnkhjIwoPulLb+RqmMM9NO2vn591SvR2H9tASpPsL5jLgVtO7Fjrgy+973vAQDuuusu7f2HHnoIn/70pwEA3/zmN2GaJu677z64rovNmzfju9/9rpS1LAuPP/44vvCFL2BsbAzpdBqf+tSn8PWvf/1SnQbDMMzbwvqOuRTwxI5ZUn6bNIqJRAI7duzAjh07ziuzcuVKPPHEEwvZNYZhmAWF9R1zKWAfO4ZhGIZhmA6hbVfsQqcXoROHH7tVe9+NyD/DDCgMP5EzNLl8P/nmdZvkxNZT0UPWCzPkm1I4Q3511bJ+acJASRugVJGIAn1/tSr5YagZwC1b99mbr9HnqiXdd8MR5BeSMTN0LFP3OfF96mM8TU+CCUf3s8jHaH9XIS/bN9yk+6asu/Em2V519dWyveF23bfv+Mm6r4vrBcCvj4JhmHeG8FxEFmC2PGvbPumNrEM6Y89zuzS5iUnShYZy/2/YcIsm994x0qfN5LgA8JtfP6/JlWukkw6NU1qkN44e1eSqFdINQpAOTmT1lCFFpcrN/OwZbVu5SD58qha3LV2n5zKU1mRYCTro7l2myQ0Mk4/c8LtvkO2erK7vYmqFDqWtpnABIPW9WhGIYdoZXrFjGIZhGIbpENpuxa7pg1Cp1VeZqjVP2244FDEaRbQSZ1b0pzu7THIwKdqzXNVX2MpVkquoq2g13Rci0iJX32LFzqX9hcoTrBXqoapVl/Zf83xtmxD02lZWG2ueHj7rqi8N2p8l9CdOV6kr6QXUD6el3mRFudYlJTlo1dX75zb60dxvm5Ubvig64RyuNDrhO2ueQ7VWt0T4Lc/agXIv12pkrQgjXe+oUfuGkqzcD/R7vKZEpLpKxKjr6XrWU3RSoOwjajmuUF6rK3ZRS7aASKlFK1r3cZ7vsfVt9dhqZoKg5Rx9Xzkv5XxrbkumA/PCVuyaUbGdNO6Yy4cL+c4M0Wbf8PHjxzkj9mXGsWPHtCSblyNvvPEG1qxZs9TdYC6AThh3rO8uP3jcMUvBhYy7tpvYRVGEkydPQgiBFStW4NixY8hms2//wQ6mWf6l3a6FEALz8/MYHh6GaV7eVv1CoYDu7m6Mj48jl8u9/QeYBeVCxngnjTvWd2fD+m7xiaIIBw8exHXXXdd21/lKYLH1XduZYk3TxMjICIrFeqBANpvlQdegHa9Fp0yCmjdMLpdru2t8JfHbjvFOGnes785NO16LThp3y5fXE8+343W+UlgsfXd5P3YwDMMwDMMwEp7YMQzDMAzDdAhtO7GLx+P42te+xrXvwNfiUsDXeGm50q//lX7+KnwtLg18nZeOxb72bRc8wTAMwzAMw1wcbbtixzAMwzAMw1wYPLFjGIZhGIbpEHhixzAMwzAM0yHwxI5hGIZhGKZD4IkdwzAMwzBMh9CWE7sdO3Zg1apVSCQS2LhxI1544YWl7tKis337dtx2223IZDIYGBjAvffei4MHD2oytVoNW7ZsQW9vL7q6unDfffdhcnJyiXrcWVyJY26xWagxPT4+jg996ENIpVIYGBjAl7/85bMKv1/OXIljj/Xd0nIljrnFpq30nWgzfvCDH4hYLCb+8R//Uezbt0989rOfFfl8XkxOTi511xaVzZs3i4ceekjs3btXvPzyy+KDH/ygWLFihSiVSlLm85//vBgdHRU7d+4UL774orj99tvFHXfcsYS97gyu1DG32CzEmA6CQFx//fVi06ZN4qWXXhJPPPGE6OvrEw888MBSnNKCc6WOPdZ3S8eVOuYWm3bSd203sduwYYPYsmWLfB2GoRgeHhbbt29fwl5dek6fPi0AiF27dgkhhCgUCsJxHPHoo49Kmf379wsAYvfu3UvVzY6Ax9yl4WLG9BNPPCFM0xQTExNS5nvf+57IZrPCdd1LewKLAI+9OqzvLh085i4NS6nv2soU63ke9uzZg02bNsn3TNPEpk2bsHv37iXs2aVnbm4OANDT0wMA2LNnD3zf167NtddeixUrVlxx12Yh4TF36biYMb17927ccMMNGBwclDKbN29GsVjEvn37LmHvFx4eewTru0sDj7lLx1Lqu7aa2J05cwZhGGonBQCDg4OYmJhYol5deqIowv33348777wT119/PQBgYmICsVgM+Xxek73Srs1Cw2Pu0nCxY3piYuKc301z2+UMj706rO8uHTzmLg1Lre/sd9B3ZpHYsmUL9u7di2effXapu8IwCwKPaeZ88NhgOo2lHtNttWLX19cHy7LOihKZnJzE0NDQEvXq0rJ161Y8/vjj+PnPf46RkRH5/tDQEDzPQ6FQ0OSvpGuzGPCYW3zeyZgeGho653fT3HY5w2OP9d2lhsfc4tMO+q6tJnaxWAy33HILdu7cKd+Logg7d+7E2NjYEvZs8RFCYOvWrXjsscfw9NNPY/Xq1dr2W265BY7jaNfm4MGDGB8f7/hrs5hcyWNusVmIMT02NoZXXnkFp0+fljJPPfUUstksrrvuuktzIovElTz2WN8tDVfymFts2krfLUDwx4Lygx/8QMTjcfHwww+LV199VXzuc58T+XxeixLpRL7whS+IXC4n/uM//kOcOnVK/lUqFSnz+c9/XqxYsUI8/fTT4sUXXxRjY2NibGxsCXvdGVypY26xWYgx3Qz/v/vuu8XLL78snnzySdHf399R6U6uxLHH+m7puFLH3GLTTvqu7SZ2Qgjxne98R6xYsULEYjGxYcMG8dxzzy11lxYdAOf8e+ihh6RMtVoVf/qnfyq6u7tFKpUSf/iHfyhOnTq1dJ3uIK7EMbfYLNSYPnr0qLjnnntEMpkUfX194s/+7M+E7/uX+GwWjytx7LG+W1quxDG32LSTvjMaHWIYhmEYhmEuc9rKx45hGIZhGIa5eHhixzAMwzAM0yHwxI5hGIZhGKZD4IkdwzAMwzBMh8ATO4ZhGIZhmA6BJ3YMwzAMwzAdAk/sGIZhGIZhOgSe2DEMwzAMw3QIPLFjGIZhGIbpEHhixzAMwzAM0yHwxI5hGIZhGKZD+P8BVwmQrFY6sosAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# We can also support different types of ablations. For example, we can use block or column ablations.\n", + "\n", + "(x_train, y_train), (x_test, y_test) = get_cifar_data()\n", + "for ablation_type in ['block', 'row']:\n", + " art_model = PyTorchDeRandomizedSmoothing(model='vit_small_patch16_224', # Name of the model acitecture to load\n", + " loss=torch.nn.CrossEntropyLoss(), # loss function to use\n", + " optimizer=torch.optim.SGD, # the optimizer to use: note! this is not initialised here we just supply the class!\n", + " optimizer_params={\"lr\": 0.01}, # the parameters to use\n", + " input_shape=(3, 32, 32), # the input shape of the data: Note! that if this is a different shape to what the ViT expects it will be re-scaled\n", + " nb_classes=10,\n", + " verbose=False,\n", + " ablation_type=ablation_type,\n", + " ablation_size=4, # Size of the retained column\n", + " replace_last_layer=True, # Replace the last layer with a new set of weights to fine tune on new data\n", + " load_pretrained=True) # if to load pre-trained weights for the ViT\n", + " \n", + " # We can see behind the scenes how PyTorchDeRandomizedSmoothing processes input by passing in the first few CIFAR\n", + " # images into art_model.ablator.forward along with a start position to retain pixels from the original image.\n", + " original_image = np.moveaxis(x_train, [1], [3])\n", + "\n", + " ablated = art_model.ablator.forward(torch.from_numpy(x_train[0:10]).to(device), column_pos=6)\n", + " ablated = ablated.cpu().detach().numpy()\n", + "\n", + " # Note the shape:\n", + " # - The ablator adds an extra channel to signify the ablated regions of the input.\n", + " # - The input is reshaped to be 224 x 224 to match the image shape that the ViT is expecting\n", + " print(f\"The shape of the ablated image is {ablated.shape}\")\n", + "\n", + " ablated_image = ablated[:, 0:3, :, :]\n", + " \n", + " # shift the axis to disply\n", + " ablated_image = np.moveaxis(ablated_image, [1], [3])\n", + "\n", + " # plot the figure: Note the axis scale!\n", + " f, axarr = plt.subplots(1,4)\n", + " axarr[0].imshow(original_image[0])\n", + " axarr[1].imshow(ablated_image[0])\n", + " axarr[2].imshow(original_image[1])\n", + " axarr[3].imshow(ablated_image[1])\n", + " plt.tight_layout()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "6ddf5329", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:Running algorithm: levine2020\n", + "INFO:art.estimators.classification.pytorch:Inferred 6 hidden layers on PyTorch classifier.\n", + "INFO:art.estimators.certification.derandomized_smoothing.pytorch:MNISTModel(\n", + " (conv_1): Conv2d(2, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n", + " (conv_2): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n", + " (fc1): Linear(in_features=6272, out_features=500, bias=True)\n", + " (fc2): Linear(in_features=500, out_features=100, bias=True)\n", + " (fc3): Linear(in_features=100, out_features=10, bias=True)\n", + " (relu): ReLU()\n", + ")\n", + "Normal Acc 0.965 Cert Acc 0.494: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████| 79/79 [00:02<00:00, 33.61it/s]\n" + ] + } + ], + "source": [ + "# The algorithm is general such that we do not have to supply only ViTs. \n", + "# We can use regular CNNs as well, howevever we will loose the advantages \n", + "# that were discussed at the start of the notebook. Here we will demonstrate it for a simple MNIST case \n", + "# and also illustrate the use of the algorithm in https://arxiv.org/pdf/2002.10733.pdf\n", + "\n", + "class MNISTModel(torch.nn.Module):\n", + "\n", + " def __init__(self):\n", + " super(MNISTModel, self).__init__()\n", + "\n", + " self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", + "\n", + " self.conv_1 = torch.nn.Conv2d(in_channels=2, # input channels are doubled as per https://arxiv.org/pdf/2002.10733.pdf\n", + " out_channels=64,\n", + " kernel_size=4,\n", + " stride=2,\n", + " padding=1)\n", + "\n", + " self.conv_2 = torch.nn.Conv2d(in_channels=64,\n", + " out_channels=128,\n", + " kernel_size=4,\n", + " stride=2, padding=1)\n", + "\n", + " self.fc1 = torch.nn.Linear(in_features=128*7*7, out_features=500)\n", + " self.fc2 = torch.nn.Linear(in_features=500, out_features=100)\n", + " self.fc3 = torch.nn.Linear(in_features=100, out_features=10)\n", + "\n", + " self.relu = torch.nn.ReLU()\n", + "\n", + " def forward(self, x: \"torch.Tensor\") -> \"torch.Tensor\":\n", + " \"\"\"\n", + " Computes the forward pass though the neural network\n", + " :param x: input data of shape (batch size, N features)\n", + " :return: model prediction\n", + " \"\"\"\n", + " x = self.relu(self.conv_1(x))\n", + " x = self.relu(self.conv_2(x))\n", + " x = torch.flatten(x, 1)\n", + " x = self.relu(self.fc1(x))\n", + " x = self.relu(self.fc2(x))\n", + " x = self.fc3(x)\n", + " return x\n", + "\n", + "def get_mnist_data():\n", + " \"\"\"\n", + " Get the MNIST data.\n", + " \"\"\"\n", + " train_set = datasets.MNIST('./data', train=True, download=True)\n", + " test_set = datasets.MNIST('./data', train=False, download=True)\n", + "\n", + " x_train = train_set.data.numpy().astype(np.float32)\n", + " y_train = train_set.targets.numpy()\n", + "\n", + " x_test = test_set.data.numpy().astype(np.float32)\n", + " y_test = test_set.targets.numpy()\n", + "\n", + " x_train = np.expand_dims(x_train, axis=1)\n", + " x_test = np.expand_dims(x_test, axis=1)\n", + "\n", + " x_train = x_train / 255.0\n", + " x_test = x_test / 255.0\n", + "\n", + " return (x_train, y_train), (x_test, y_test)\n", + "\n", + "\n", + "model = MNISTModel()\n", + "(x_train, y_train), (x_test, y_test) = get_mnist_data()\n", + "optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005)\n", + "\n", + "art_model = PyTorchDeRandomizedSmoothing(model=model,\n", + " loss=torch.nn.CrossEntropyLoss(),\n", + " optimizer=optimizer,\n", + " input_shape=(1, 28, 28),\n", + " nb_classes=10,\n", + " ablation_type='column',\n", + " algorithm='levine2020', # Algorithm selection\n", + " threshold=0.3, # Requires a threshold\n", + " ablation_size=2,\n", + " logits=True)\n", + "\n", + "scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[200], gamma=0.1)\n", + "\n", + "# Uncomment to train.\n", + "'''\n", + "art_model.fit(x_train, y_train,\n", + " nb_epochs=400,\n", + " scheduler=scheduler)\n", + "torch.save(art_model.model.state_dict(), 'trained_mnist.pt')\n", + "\n", + "'''\n", + "art_model.model.load_state_dict(torch.load('trained_mnist.pt'))\n", + "acc, cert_acc = art_model.eval_and_certify(x_test, y_test, size_to_certify=5)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/requirements_test.txt b/requirements_test.txt index 304998b81c..1e9ad346f9 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -35,6 +35,9 @@ torch==1.13.1 torchaudio==0.13.1+cpu torchvision==0.14.1+cpu +# PyTorch image transformers +timm==0.9.2 + catboost==1.1.1 GPy==1.10.0 lightgbm==3.3.5 diff --git a/setup.py b/setup.py index 064087b435..fdf9af2298 100644 --- a/setup.py +++ b/setup.py @@ -112,8 +112,9 @@ def get_version(rel_path): "requests", "sortedcontainers", "numba", + "timm", "multiprocess", - ], + ] }, classifiers=[ "Development Status :: 3 - Alpha", diff --git a/tests/estimators/certification/test_derandomized_smoothing.py b/tests/estimators/certification/test_derandomized_smoothing.py index 1c93dfec9e..bcae2c4844 100644 --- a/tests/estimators/certification/test_derandomized_smoothing.py +++ b/tests/estimators/certification/test_derandomized_smoothing.py @@ -118,10 +118,10 @@ def forward(self, x): for dataset, dataset_name in zip([fix_get_mnist_data, fix_get_cifar10_data], ["mnist", "cifar"]): if dataset_name == "mnist": ptc = SmallMNISTModel().to(device) - input_shape = (2, 28, 28) + input_shape = (1, 28, 28) else: ptc = SmallCIFARModel().to(device) - input_shape = (6, 32, 32) + input_shape = (3, 32, 32) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(ptc.parameters(), lr=0.01, momentum=0.9) @@ -137,6 +137,7 @@ def forward(self, x): ablation_type=ablation_type, ablation_size=5, threshold=0.3, + algorithm="levine2020", logits=True, ) classifier.fit(x=dataset[0], y=dataset[1], nb_epochs=1) @@ -152,7 +153,7 @@ def test_tf2_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data): import tensorflow as tf def build_model(input_shape): - img_inputs = tf.keras.Input(shape=input_shape) + img_inputs = tf.keras.Input(shape=(input_shape[0], input_shape[1], input_shape[2] * 2)) x = tf.keras.layers.Conv2D(filters=32, kernel_size=(4, 4), strides=(2, 2), activation="relu")(img_inputs) x = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)(x) # tensorflow uses channels last and we are loading weights from an originally trained pytorch model @@ -167,9 +168,9 @@ def build_model(input_shape): for dataset, dataset_name in zip([fix_get_mnist_data, fix_get_cifar10_data], ["mnist", "cifar"]): if dataset_name == "mnist": - input_shape = (28, 28, 2) + input_shape = (28, 28, 1) else: - input_shape = (32, 32, 6) + input_shape = (32, 32, 3) net = build_model(input_shape=input_shape) try: @@ -226,7 +227,6 @@ def forward(self, x): return self.fc2(x) def load_weights(self): - fpath = os.path.join( os.path.dirname(os.path.dirname(__file__)), "../../utils/resources/models/certification/derandomized/" ) @@ -262,21 +262,23 @@ def load_weights(self): clip_values=(0, 1), loss=criterion, optimizer=optimizer, - input_shape=(2, 28, 28), + input_shape=(1, 28, 28), nb_classes=10, ablation_type=ablation_type, ablation_size=ablation_size, threshold=0.3, + algorithm="levine2020", logits=True, ) preds = classifier.predict(np.copy(fix_get_mnist_data[0])) - num_certified = classifier.ablator.certify(preds, size_to_certify=size_to_certify) - + cert, cert_and_correct, top_predicted_class_argmax = classifier.ablator.certify( + preds, label=fix_get_mnist_data[1], size_to_certify=size_to_certify + ) if ablation_type == "column": - assert np.sum(num_certified) == 52 + assert np.sum(cert.cpu().numpy()) == 52 else: - assert np.sum(num_certified) == 22 + assert np.sum(cert.cpu().numpy()) == 22 except ARTTestException as e: art_warning(e) @@ -290,7 +292,7 @@ def test_tf2_mnist_certification(art_warning, fix_get_mnist_data): import tensorflow as tf def build_model(input_shape): - img_inputs = tf.keras.Input(shape=input_shape) + img_inputs = tf.keras.Input(shape=(input_shape[0], input_shape[1], input_shape[2] * 2)) x = tf.keras.layers.Conv2D(filters=32, kernel_size=(4, 4), strides=(2, 2), activation="relu")(img_inputs) x = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)(x) # tensorflow uses channels last and we are loading weights from an originally trained pytorch model @@ -322,7 +324,7 @@ def get_weights(): weight_list.append(w) return weight_list - net = build_model(input_shape=(28, 28, 2)) + net = build_model(input_shape=(28, 28, 1)) net.set_weights(get_weights()) loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True) @@ -346,7 +348,7 @@ def get_weights(): clip_values=(0, 1), loss_object=loss_object, optimizer=optimizer, - input_shape=(28, 28, 2), + input_shape=(28, 28, 1), nb_classes=10, ablation_type=ablation_type, ablation_size=ablation_size, @@ -358,12 +360,14 @@ def get_weights(): x = np.squeeze(x) x = np.expand_dims(x, axis=-1) preds = classifier.predict(x) - num_certified = classifier.ablator.certify(preds, size_to_certify=size_to_certify) + cert, cert_and_correct, top_predicted_class_argmax = classifier.ablator.certify( + preds, label=fix_get_mnist_data[1], size_to_certify=size_to_certify + ) if ablation_type == "column": - assert np.sum(num_certified) == 52 + assert np.sum(cert) == 52 else: - assert np.sum(num_certified) == 22 + assert np.sum(cert) == 22 except ARTTestException as e: art_warning(e) diff --git a/tests/estimators/certification/test_vision_transformers.py b/tests/estimators/certification/test_vision_transformers.py new file mode 100644 index 0000000000..9a42b8eb97 --- /dev/null +++ b/tests/estimators/certification/test_vision_transformers.py @@ -0,0 +1,616 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import pytest +import os + +import numpy as np + +from art.utils import load_dataset +from tests.utils import ARTTestException + + +@pytest.fixture() +def fix_get_mnist_data(): + """ + Get the first 128 samples of the mnist test set with channels first format + + :return: First 128 sample/label pairs of the MNIST test dataset. + """ + nb_test = 128 + + (_, _), (x_test, y_test), _, _ = load_dataset("mnist") + x_test = np.squeeze(x_test).astype(np.float32) + x_test = np.expand_dims(x_test, axis=1) + y_test = np.argmax(y_test, axis=1) + + x_test, y_test = x_test[:nb_test], y_test[:nb_test] + return x_test, y_test + + +@pytest.fixture() +def fix_get_cifar10_data(): + """ + Get the first 128 samples of the cifar10 test set + + :return: First 128 sample/label pairs of the cifar10 test dataset. + """ + nb_test = 128 + + (_, _), (x_test, y_test), _, _ = load_dataset("cifar10") + y_test = np.argmax(y_test, axis=1) + x_test, y_test = x_test[:nb_test], y_test[:nb_test] + x_test = np.transpose(x_test, (0, 3, 1, 2)) # return in channels first format + return x_test.astype(np.float32), y_test + + +@pytest.mark.only_with_platform("pytorch") +def test_ablation(art_warning, fix_get_mnist_data, fix_get_cifar10_data): + """ + Check that the ablation is being performed correctly + """ + import torch + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + from art.estimators.certification.derandomized_smoothing.ablators.pytorch import ColumnAblatorPyTorch + + try: + cifar_data = fix_get_cifar10_data[0] + + col_ablator = ColumnAblatorPyTorch( + ablation_size=4, + channels_first=True, + to_reshape=False, # do not upsample initially + mode="ViT", + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + ) + + cifar_data = torch.from_numpy(cifar_data).to(device) + # check that the ablation functioned when in the middle of the image + ablated = col_ablator.forward(cifar_data, column_pos=10) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, :, 0:10]) == 0 + assert torch.sum(ablated[:, :, :, 10:14]) > 0 + assert torch.sum(ablated[:, :, :, 14:]) == 0 + + # check that the ablation wraps when on the edge of the image + ablated = col_ablator.forward(cifar_data, column_pos=30) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, :, 30:]) > 0 + assert torch.sum(ablated[:, :, :, 2:30]) == 0 + assert torch.sum(ablated[:, :, :, :2]) > 0 + + # check that upsampling works as expected + col_ablator = ColumnAblatorPyTorch( + ablation_size=4, + channels_first=True, + to_reshape=True, + mode="ViT", + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + ) + + ablated = col_ablator.forward(cifar_data, column_pos=10) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, :, : 10 * 7]) == 0 + assert torch.sum(ablated[:, :, :, 10 * 7 : 14 * 7]) > 0 + assert torch.sum(ablated[:, :, :, 14 * 7 :]) == 0 + + # check that the ablation wraps when on the edge of the image + ablated = col_ablator.forward(cifar_data, column_pos=30) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, :, 30 * 7 :]) > 0 + assert torch.sum(ablated[:, :, :, 2 * 7 : 30 * 7]) == 0 + assert torch.sum(ablated[:, :, :, : 2 * 7]) > 0 + + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.only_with_platform("pytorch") +def test_ablation_row(art_warning, fix_get_mnist_data, fix_get_cifar10_data): + """ + Check that the ablation is being performed correctly + """ + import torch + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + from art.estimators.certification.derandomized_smoothing.ablators.pytorch import ColumnAblatorPyTorch + + try: + cifar_data = fix_get_cifar10_data[0] + + col_ablator = ColumnAblatorPyTorch( + ablation_size=4, + channels_first=True, + to_reshape=False, # do not upsample initially + mode="ViT", + ablation_mode="row", + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + ) + + cifar_data = torch.from_numpy(cifar_data).to(device) + # check that the ablation functioned when in the middle of the image + ablated = col_ablator.forward(cifar_data, column_pos=10) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, 0:10, :]) == 0 + assert torch.sum(ablated[:, :, 10:14, :]) > 0 + assert torch.sum(ablated[:, :, 14:, :]) == 0 + + # check that the ablation wraps when on the edge of the image + ablated = col_ablator.forward(cifar_data, column_pos=30) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, 30:, :]) > 0 + assert torch.sum(ablated[:, :, 2:30, :]) == 0 + assert torch.sum(ablated[:, :, :2, :]) > 0 + + # check that upsampling works as expected + col_ablator = ColumnAblatorPyTorch( + ablation_size=4, + channels_first=True, + to_reshape=True, + mode="ViT", + ablation_mode="row", + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + ) + + ablated = col_ablator.forward(cifar_data, column_pos=10) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, : 10 * 7, :]) == 0 + assert torch.sum(ablated[:, :, 10 * 7 : 14 * 7, :]) > 0 + assert torch.sum(ablated[:, :, 14 * 7 :, :]) == 0 + + # check that the ablation wraps when on the edge of the image + ablated = col_ablator.forward(cifar_data, column_pos=30) + + assert ablated.shape[1] == 4 + assert torch.sum(ablated[:, :, 30 * 7 :, :]) > 0 + assert torch.sum(ablated[:, :, 2 * 7 : 30 * 7, :]) == 0 + assert torch.sum(ablated[:, :, : 2 * 7, :]) > 0 + + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.only_with_platform("pytorch") +def test_pytorch_training(art_warning, fix_get_mnist_data, fix_get_cifar10_data): + """ + Check that the training loop for pytorch does not result in errors + """ + import torch + from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + try: + cifar_data = fix_get_cifar10_data[0][:50] + cifar_labels = fix_get_cifar10_data[1][:50] + + art_model = PyTorchDeRandomizedSmoothing( + model="vit_small_patch16_224", + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + load_pretrained=True, + replace_last_layer=True, + verbose=False, + ) + + scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[1], gamma=0.1) + + head = { + "weight": torch.tensor( + np.load( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "../../utils/resources/models/certification/smooth_vit/head_weight.npy", + ) + ) + ).to(device), + "bias": torch.tensor( + np.load( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "../../utils/resources/models/certification/smooth_vit/head_bias.npy", + ) + ) + ).to(device), + } + art_model.model.head.load_state_dict(head) + + art_model.fit(cifar_data, cifar_labels, nb_epochs=2, update_batchnorm=True, scheduler=scheduler) + preds = art_model.predict(cifar_data) + + gt_preds = np.load( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "../../utils/resources/models/certification/smooth_vit/cumulative_predictions.npy", + ) + ) + + np.array_equal(preds, gt_preds) + + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.only_with_platform("pytorch") +def test_certification_function(art_warning, fix_get_mnist_data, fix_get_cifar10_data): + """ + Check that based on a given set of synthetic class predictions the certification gives the expected results. + """ + from art.estimators.certification.derandomized_smoothing.ablators.pytorch import ColumnAblatorPyTorch + import torch + + try: + col_ablator = ColumnAblatorPyTorch( + ablation_size=4, + channels_first=True, + mode="ViT", + to_reshape=True, # do not upsample initially + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + ) + pred_counts = torch.from_numpy(np.asarray([[20, 5, 1], [10, 5, 1], [1, 16, 1]])) + cert, cert_and_correct, top_predicted_class = col_ablator.certify( + pred_counts=pred_counts, + size_to_certify=4, + label=0, + ) + assert torch.equal(cert, torch.tensor([True, False, True])) + assert torch.equal(cert_and_correct, torch.tensor([True, False, False])) + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.only_with_platform("pytorch") +@pytest.mark.parametrize("ablation", ["block", "column"]) +def test_end_to_end_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10_data, ablation): + """ + Assert implementations matches original with a forward pass through the same model architecture. + There are some differences in architecture between the same model names in timm vs the original implementation. + We use vit_base_patch16_224 which matches. + """ + import torch + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing + + from art.estimators.certification.derandomized_smoothing.ablators import ( + ColumnAblatorPyTorch, + BlockAblatorPyTorch, + ) + + cifar_data = fix_get_cifar10_data[0][:50] + torch.manual_seed(1234) + try: + art_model = PyTorchDeRandomizedSmoothing( + model="vit_base_patch16_224", + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + load_pretrained=True, + replace_last_layer=True, + verbose=False, + ) + + if ablation == "column": + ablator = ColumnAblatorPyTorch( + ablation_size=4, + channels_first=True, + to_reshape=True, + mode="ViT", + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + ) + ablated = ablator.forward(cifar_data, column_pos=10) + madry_preds = torch.load( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "../../utils/resources/models/certification/smooth_vit/madry_preds_column.pt", + ) + ) + art_preds = art_model.model(ablated) + assert torch.allclose(madry_preds.to(device), art_preds, rtol=1e-04, atol=1e-04) + + elif ablation == "block": + ablator = BlockAblatorPyTorch( + ablation_size=4, + channels_first=True, + to_reshape=True, + original_shape=(3, 32, 32), + output_shape=(3, 224, 224), + mode="ViT", + ) + ablated = ablator.forward(cifar_data, column_pos=10, row_pos=28) + madry_preds = torch.load( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "../../utils/resources/models/certification/smooth_vit/madry_preds_block.pt", + ) + ) + art_preds = art_model.model(ablated) + assert torch.allclose(madry_preds.to(device), art_preds, rtol=1e-04, atol=1e-04) + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.only_with_platform("pytorch") +@pytest.mark.parametrize("ablation", ["block", "column"]) +def test_certification_equivalence(art_warning, fix_get_mnist_data, fix_get_cifar10_data, ablation): + """ + With the forward pass equivalence asserted, we now confirm that the certification functions in the same + way by doing a full end to end prediction and certification test over the data. + """ + import torch + + from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + try: + art_model = PyTorchDeRandomizedSmoothing( + model="vit_small_patch16_224", + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_type=ablation, + ablation_size=4, + load_pretrained=True, + replace_last_layer=True, + verbose=False, + ) + + head = { + "weight": torch.tensor( + np.load( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "../../utils/resources/models/certification/smooth_vit/head_weight.npy", + ) + ) + ).to(device), + "bias": torch.tensor( + np.load( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "../../utils/resources/models/certification/smooth_vit/head_bias.npy", + ) + ) + ).to(device), + } + art_model.model.head.load_state_dict(head) + + if torch.cuda.is_available(): + num_to_fetch = 100 + else: + num_to_fetch = 10 + + cifar_data = torch.from_numpy(fix_get_cifar10_data[0][:num_to_fetch]).to(device) + cifar_labels = torch.from_numpy(fix_get_cifar10_data[1][:num_to_fetch]).to(device) + + acc, cert_acc = art_model.eval_and_certify( + x=cifar_data.cpu().numpy(), y=cifar_labels.cpu().numpy(), batch_size=num_to_fetch, size_to_certify=4 + ) + + upsample = torch.nn.Upsample(scale_factor=224 / 32) + cifar_data = upsample(cifar_data) + acc_non_ablation = art_model.model(cifar_data) + acc_non_ablation = art_model.get_accuracy(acc_non_ablation, cifar_labels) + + if torch.cuda.is_available(): + if ablation == "column": + assert np.allclose(cert_acc.cpu().numpy(), 0.29) + assert np.allclose(acc.cpu().numpy(), 0.57) + else: + assert np.allclose(cert_acc.cpu().numpy(), 0.16) + assert np.allclose(acc.cpu().numpy(), 0.24) + assert np.allclose(acc_non_ablation, 0.52) + else: + if ablation == "column": + assert np.allclose(cert_acc.cpu().numpy(), 0.30) + assert np.allclose(acc.cpu().numpy(), 0.70) + else: + assert np.allclose(cert_acc.cpu().numpy(), 0.20) + assert np.allclose(acc.cpu().numpy(), 0.20) + assert np.allclose(acc_non_ablation, 0.60) + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.only_with_platform("pytorch") +def test_equivalence(art_warning, fix_get_cifar10_data): + import torch + from art.estimators.certification.derandomized_smoothing import PyTorchDeRandomizedSmoothing + from art.estimators.certification.derandomized_smoothing.vision_transformers.pytorch import PyTorchVisionTransformer + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + try: + + class MadrylabImplementations: + """ + Code adapted from the implementation in https://github.com/MadryLab/smoothed-vit + to check against our own functionality. + + Original License: + + MIT License + + Copyright (c) 2021 Madry Lab + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + + """ + + def __init__(self): + pass + + @classmethod + def token_dropper(cls, x, mask): + """ + The implementation of dropping tokens has been done slightly differently in this tool. + Here we check that it is equivalent to the original implementation + """ + + class MaskProcessor(torch.nn.Module): + def __init__(self, patch_size=16): + super().__init__() + self.avg_pool = torch.nn.AvgPool2d(patch_size) + + def forward(self, ones_mask): + B = ones_mask.shape[0] + ones_mask = ones_mask[0].unsqueeze(0) # take the first mask + ones_mask = self.avg_pool(ones_mask)[0] + ones_mask = torch.where(ones_mask.view(-1) > 0)[0] + 1 + ones_mask = torch.cat([torch.IntTensor(1).fill_(0).to(device), ones_mask]).unsqueeze(0) + ones_mask = ones_mask.expand(B, -1) + return ones_mask + + mask_processor = MaskProcessor() + patch_mask = mask_processor(mask) + + # x = self.pos_drop(x) # B, N, C + if patch_mask is not None: + # patch_mask is B, K + B, N, C = x.shape + if len(patch_mask.shape) == 1: # not a separate one per batch + x = x[:, patch_mask] + else: + patch_mask = patch_mask.unsqueeze(-1).expand(-1, -1, C) + x = torch.gather(x, 1, patch_mask) + return x + + @classmethod + def embedder(cls, x, pos_embed, cls_token): + """ + NB, original code used the pos embed from the divit rather than vit + (which we pull from our model) which we use here. + + From timm vit: + self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02) + + From timm dvit: + self.pos_embed = nn.Parameter(torch.zeros(1, + self.patch_embed.num_patches + self.num_prefix_tokens, + self.embed_dim)) + + From repo: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + """ + x = torch.cat((cls_token.expand(x.shape[0], -1, -1), x), dim=1) + return x + pos_embed + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + """ + This is a copy of the function in ArtViT.forward_features + except we also perform an equivalence assertion compared to the implementation + in https://github.com/MadryLab/smoothed-vit (see MadrylabImplementations class above) + + The forward pass of the ViT. + + :param x: Input data. + :return: The input processed by the ViT backbone + """ + import copy + + ablated_input = False + if x.shape[1] == self.in_chans + 1: + ablated_input = True + + if ablated_input: + x, ablation_mask = x[:, : self.in_chans], x[:, self.in_chans : self.in_chans + 1] + + x = self.patch_embed(x) + + madry_embed = MadrylabImplementations.embedder(copy.copy(x), self.pos_embed, self.cls_token) + x = self._pos_embed(x) + assert torch.equal(madry_embed, x) + + # pass the x into the token dropping code + madry_dropped = MadrylabImplementations.token_dropper(copy.copy(x), ablation_mask) + + if self.to_drop_tokens and ablated_input: + ones = self.ablation_mask_embedder(ablation_mask) + to_drop = torch.sum(ones, dim=2) + indexes = torch.gt(torch.where(to_drop > 1, 1, 0), 0) + x = self.drop_tokens(x, indexes) + + assert torch.equal(madry_dropped, x) + + x = self.norm_pre(x) + x = self.blocks(x) + + return self.norm(x) + + # Replace the forward_features with the forward_features code with checks. + PyTorchVisionTransformer.forward_features = forward_features + + art_model = PyTorchDeRandomizedSmoothing( + model="vit_small_patch16_224", + loss=torch.nn.CrossEntropyLoss(), + optimizer=torch.optim.SGD, + optimizer_params={"lr": 0.01}, + input_shape=(3, 32, 32), + nb_classes=10, + ablation_size=4, + load_pretrained=False, + replace_last_layer=True, + verbose=False, + ) + + cifar_data = fix_get_cifar10_data[0][:50] + cifar_labels = fix_get_cifar10_data[1][:50] + + scheduler = torch.optim.lr_scheduler.MultiStepLR(art_model.optimizer, milestones=[1], gamma=0.1) + art_model.fit(cifar_data, cifar_labels, nb_epochs=1, update_batchnorm=True, scheduler=scheduler, batch_size=128) + except ARTTestException as e: + art_warning(e) diff --git a/utils/resources/models/certification/smooth_vit/cumulative_predictions.npy b/utils/resources/models/certification/smooth_vit/cumulative_predictions.npy new file mode 100644 index 0000000000..71c585b2c5 Binary files /dev/null and b/utils/resources/models/certification/smooth_vit/cumulative_predictions.npy differ diff --git a/utils/resources/models/certification/smooth_vit/head_bias.npy b/utils/resources/models/certification/smooth_vit/head_bias.npy new file mode 100644 index 0000000000..340c4215be Binary files /dev/null and b/utils/resources/models/certification/smooth_vit/head_bias.npy differ diff --git a/utils/resources/models/certification/smooth_vit/head_weight.npy b/utils/resources/models/certification/smooth_vit/head_weight.npy new file mode 100644 index 0000000000..2f718d5fbf Binary files /dev/null and b/utils/resources/models/certification/smooth_vit/head_weight.npy differ diff --git a/utils/resources/models/certification/smooth_vit/madry_preds_block.pt b/utils/resources/models/certification/smooth_vit/madry_preds_block.pt new file mode 100644 index 0000000000..f1d0fb862e Binary files /dev/null and b/utils/resources/models/certification/smooth_vit/madry_preds_block.pt differ diff --git a/utils/resources/models/certification/smooth_vit/madry_preds_column.pt b/utils/resources/models/certification/smooth_vit/madry_preds_column.pt new file mode 100644 index 0000000000..ddb01f7cb0 Binary files /dev/null and b/utils/resources/models/certification/smooth_vit/madry_preds_column.pt differ