Skip to content

Commit

Permalink
Merge branch 'dev_1.16.0' into dev_1.16.0_mi_improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
beat-buesser authored Sep 13, 2023
2 parents 95d1480 + 7f33be1 commit 5d0fc3b
Show file tree
Hide file tree
Showing 12 changed files with 960 additions and 83 deletions.
15 changes: 15 additions & 0 deletions art/attacks/attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,8 @@ def set_params(self, **kwargs) -> None:
for key, value in kwargs.items():
if key in self.attack_params:
setattr(self, key, value)
else:
raise ValueError(f'The attribute "{key}" cannot be set for this attack.')
self._check_params()

def _check_params(self) -> None:
Expand Down Expand Up @@ -186,6 +188,19 @@ def is_estimator_valid(estimator, estimator_requirements) -> bool:
return False
return True

def __repr__(self):
"""
Returns a string describing the attack class and attack_params
"""
param_str = ""
for param in self.attack_params:
if hasattr(self, param):
param_str += f"{param}={getattr(self, param)}, "
elif hasattr(self, "_attack"):
if hasattr(self._attack, param):
param_str += f"{param}={getattr(self._attack, param)}, "
return f"{type(self).__name__}({param_str})"


class EvasionAttack(Attack):
"""
Expand Down
199 changes: 129 additions & 70 deletions art/attacks/evasion/auto_attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,19 @@
| Paper link: https://arxiv.org/abs/2003.01690
"""
import logging
from typing import List, Optional, Union, Tuple, TYPE_CHECKING
from copy import deepcopy
from typing import TYPE_CHECKING, List, Optional, Tuple, Union

import numpy as np

from art.config import ART_NUMPY_DTYPE
from art.attacks.attack import EvasionAttack
from art.attacks.evasion.auto_projected_gradient_descent import AutoProjectedGradientDescent
from art.attacks.evasion.deepfool import DeepFool
from art.attacks.evasion.square_attack import SquareAttack
from art.estimators.estimator import BaseEstimator
from art.config import ART_NUMPY_DTYPE
from art.estimators.classification.classifier import ClassifierMixin
from art.utils import get_labels_np_array, check_and_transform_label_format
from art.estimators.estimator import BaseEstimator
from art.utils import check_and_transform_label_format, get_labels_np_array

if TYPE_CHECKING:
from art.utils import CLASSIFIER_TYPE
Expand All @@ -55,6 +56,7 @@ class AutoAttack(EvasionAttack):
"batch_size",
"estimator_orig",
"targeted",
"parallel",
]

_estimator_requirements = (BaseEstimator, ClassifierMixin)
Expand All @@ -69,6 +71,7 @@ def __init__(
batch_size: int = 32,
estimator_orig: Optional["CLASSIFIER_TYPE"] = None,
targeted: bool = False,
parallel: bool = False,
):
"""
Create a :class:`.AutoAttack` instance.
Expand All @@ -83,6 +86,7 @@ def __init__(
:param estimator_orig: Original estimator to be attacked by adversarial examples.
:param targeted: If False run only untargeted attacks, if True also run targeted attacks against each possible
target.
:param parallel: If True run attacks in parallel.
"""
super().__init__(estimator=estimator)

Expand Down Expand Up @@ -140,6 +144,7 @@ def __init__(
self.estimator_orig = estimator

self._targeted = targeted
self.parallel = parallel
self._check_params()

def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
Expand All @@ -157,6 +162,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
:type mask: `np.ndarray`
:return: An array holding the adversarial examples.
"""
import multiprocess

x_adv = x.astype(ART_NUMPY_DTYPE)
if y is not None:
y = check_and_transform_label_format(y, nb_classes=self.estimator.nb_classes)
Expand All @@ -168,6 +175,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
y_pred = self.estimator_orig.predict(x.astype(ART_NUMPY_DTYPE))
sample_is_robust = np.argmax(y_pred, axis=1) == np.argmax(y, axis=1)

args = []
# Untargeted attacks
for attack in self.attacks:

Expand All @@ -178,13 +186,29 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
if attack.targeted:
attack.set_params(targeted=False)

x_adv, sample_is_robust = self._run_attack(
x=x_adv,
y=y,
sample_is_robust=sample_is_robust,
attack=attack,
**kwargs,
)
if self.parallel:
args.append(
(
deepcopy(x_adv),
deepcopy(y),
deepcopy(sample_is_robust),
deepcopy(attack),
deepcopy(self.estimator),
deepcopy(self.norm),
deepcopy(self.eps),
)
)
else:
x_adv, sample_is_robust = run_attack(
x=x_adv,
y=y,
sample_is_robust=sample_is_robust,
attack=attack,
estimator_orig=self.estimator,
norm=self.norm,
eps=self.eps,
**kwargs,
)

# Targeted attacks
if self.targeted:
Expand All @@ -197,10 +221,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n

for attack in self.attacks:

if attack.targeted is not None:

if not attack.targeted:
attack.set_params(targeted=True)
try:
attack.set_params(targeted=True)

for i in range(self.estimator.nb_classes - 1):
# Stop if all samples are misclassified
Expand All @@ -211,64 +233,46 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
targeted_labels[:, i], nb_classes=self.estimator.nb_classes
)

x_adv, sample_is_robust = self._run_attack(
x=x_adv,
y=target,
sample_is_robust=sample_is_robust,
attack=attack,
**kwargs,
)
if self.parallel:
args.append(
(
deepcopy(x_adv),
deepcopy(target),
deepcopy(sample_is_robust),
deepcopy(attack),
deepcopy(self.estimator),
deepcopy(self.norm),
deepcopy(self.eps),
)
)
else:
x_adv, sample_is_robust = run_attack(
x=x_adv,
y=target,
sample_is_robust=sample_is_robust,
attack=attack,
estimator_orig=self.estimator,
norm=self.norm,
eps=self.eps,
**kwargs,
)
except ValueError as error:
logger.warning("Error completing attack: %s}", str(error))

if self.parallel:
with multiprocess.get_context("spawn").Pool() as pool:
# Results come back in the order that they were issued
results = pool.starmap(run_attack, args)
perturbations = []
is_robust = []
for img_idx in range(len(x)):
perturbations.append(np.array([np.linalg.norm(x[img_idx] - i[0][img_idx]) for i in results]))
is_robust.append([i[1][img_idx] for i in results])
best_attacks = np.argmin(np.where(np.invert(np.array(is_robust)), np.array(perturbations), np.inf), axis=1)
x_adv = np.concatenate([results[best_attacks[img]][0][[img]] for img in range(len(x))])

return x_adv

def _run_attack(
self,
x: np.ndarray,
y: np.ndarray,
sample_is_robust: np.ndarray,
attack: EvasionAttack,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Run attack.
:param x: An array of the original inputs.
:param y: An array of the labels.
:param sample_is_robust: Store the initial robustness of examples.
:param attack: Evasion attack to run.
:return: An array holding the adversarial examples.
"""
# Attack only correctly classified samples
x_robust = x[sample_is_robust]
y_robust = y[sample_is_robust]

# Generate adversarial examples
x_robust_adv = attack.generate(x=x_robust, y=y_robust, **kwargs)
y_pred_robust_adv = self.estimator_orig.predict(x_robust_adv)

# Check and update successful examples
rel_acc = 1e-4
order = np.inf if self.norm == "inf" else self.norm
norm_is_smaller_eps = (1 - rel_acc) * np.linalg.norm(
(x_robust_adv - x_robust).reshape((x_robust_adv.shape[0], -1)), axis=1, ord=order
) <= self.eps

if attack.targeted:
samples_misclassified = np.argmax(y_pred_robust_adv, axis=1) == np.argmax(y_robust, axis=1)
elif not attack.targeted:
samples_misclassified = np.argmax(y_pred_robust_adv, axis=1) != np.argmax(y_robust, axis=1)
else: # pragma: no cover
raise ValueError

sample_is_not_robust = np.logical_and(samples_misclassified, norm_is_smaller_eps)

x_robust[sample_is_not_robust] = x_robust_adv[sample_is_not_robust]
x[sample_is_robust] = x_robust

sample_is_robust[sample_is_robust] = np.invert(sample_is_not_robust)

return x, sample_is_robust

def _check_params(self) -> None:
if self.norm not in [1, 2, np.inf, "inf"]:
raise ValueError('The argument norm has to be either 1, 2, np.inf, "inf".')
Expand All @@ -281,3 +285,58 @@ def _check_params(self) -> None:

if not isinstance(self.batch_size, int) or self.batch_size <= 0:
raise ValueError("The argument batch_size has to be of type int and larger than zero.")


def run_attack(
x: np.ndarray,
y: np.ndarray,
sample_is_robust: np.ndarray,
attack: EvasionAttack,
estimator_orig: "CLASSIFIER_TYPE",
norm: Union[int, float, str] = np.inf,
eps: float = 0.3,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Run attack.
:param x: An array of the original inputs.
:param y: An array of the labels.
:param sample_is_robust: Store the initial robustness of examples.
:param attack: Evasion attack to run.
:param estimator_orig: Original estimator to be attacked by adversarial examples.
:param norm: The norm of the adversarial perturbation. Possible values: "inf", np.inf, 1 or 2.
:param eps: Maximum perturbation that the attacker can introduce.
:return: An array holding the adversarial examples.
"""
# Attack only correctly classified samples
x_robust = x[sample_is_robust]
y_robust = y[sample_is_robust]

# Generate adversarial examples
x_robust_adv = attack.generate(x=x_robust, y=y_robust, **kwargs)
y_pred_robust_adv = estimator_orig.predict(x_robust_adv)

# Check and update successful examples
rel_acc = 1e-4
order = np.inf if norm == "inf" else norm
assert isinstance(order, (int, float))
norm_is_smaller_eps = (1 - rel_acc) * np.linalg.norm(
(x_robust_adv - x_robust).reshape((x_robust_adv.shape[0], -1)), axis=1, ord=order
) <= eps

if attack.targeted:
samples_misclassified = np.argmax(y_pred_robust_adv, axis=1) == np.argmax(y_robust, axis=1)
elif not attack.targeted:
samples_misclassified = np.argmax(y_pred_robust_adv, axis=1) != np.argmax(y_robust, axis=1)
else: # pragma: no cover
raise ValueError

sample_is_not_robust = np.logical_and(samples_misclassified, norm_is_smaller_eps)

x_robust[sample_is_not_robust] = x_robust_adv[sample_is_not_robust]
x[sample_is_robust] = x_robust

sample_is_robust[sample_is_robust] = np.invert(sample_is_not_robust)

return x, sample_is_robust
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class ProjectedGradientDescentCommon(FastGradientMethod):
| Paper link: https://arxiv.org/abs/1706.06083
"""

attack_params = FastGradientMethod.attack_params + ["max_iter", "random_eps", "verbose"]
attack_params = FastGradientMethod.attack_params + ["decay", "max_iter", "random_eps", "verbose"]
_estimator_requirements = (BaseEstimator, LossGradientsMixin)

def __init__(
Expand Down
12 changes: 8 additions & 4 deletions art/defences/trainer/adversarial_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,8 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg
logged = False
self._precomputed_adv_samples = []
for attack in tqdm(self.attacks, desc="Precompute adversarial examples."):
attack.set_params(verbose=False)
if "verbose" in attack.attack_params:
attack.set_params(verbose=False)
if "targeted" in attack.attack_params and attack.targeted: # type: ignore
raise NotImplementedError("Adversarial training with targeted attacks is currently not implemented")

Expand Down Expand Up @@ -155,7 +156,8 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg

# Choose indices to replace with adversarial samples
attack = self.attacks[attack_id]
attack.set_params(verbose=False)
if "verbose" in attack.attack_params:
attack.set_params(verbose=False)

# If source and target models are the same, craft fresh adversarial samples
if attack.estimator == self._classifier:
Expand Down Expand Up @@ -210,7 +212,8 @@ def fit( # pylint: disable=W0221
logged = False
self._precomputed_adv_samples = []
for attack in tqdm(self.attacks, desc="Precompute adv samples"):
attack.set_params(verbose=False)
if "verbose" in attack.attack_params:
attack.set_params(verbose=False)
if "targeted" in attack.attack_params and attack.targeted: # type: ignore
raise NotImplementedError("Adversarial training with targeted attacks is currently not implemented")

Expand All @@ -234,7 +237,8 @@ def fit( # pylint: disable=W0221
# Choose indices to replace with adversarial samples
nb_adv = int(np.ceil(self.ratio * x_batch.shape[0]))
attack = self.attacks[attack_id]
attack.set_params(verbose=False)
if "verbose" in attack.attack_params:
attack.set_params(verbose=False)
if self.ratio < 1:
adv_ids = np.random.choice(x_batch.shape[0], size=nb_adv, replace=False)
else:
Expand Down
8 changes: 5 additions & 3 deletions art/estimators/classification/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -1047,9 +1047,10 @@ def save(self, filename: str, path: Optional[str] = None) -> None:
# pylint: disable=W0212
# disable pylint because access to _modules required
torch.save(self._model._model.state_dict(), full_path + ".model")
torch.save(self._optimizer.state_dict(), full_path + ".optimizer") # type: ignore
if self._optimizer is not None:
torch.save(self._optimizer.state_dict(), full_path + ".optimizer") # type: ignore
logger.info("Optimizer state dict saved in path: %s.", full_path + ".optimizer")
logger.info("Model state dict saved in path: %s.", full_path + ".model")
logger.info("Optimizer state dict saved in path: %s.", full_path + ".optimizer")

def __getstate__(self) -> Dict[str, Any]:
"""
Expand Down Expand Up @@ -1094,7 +1095,8 @@ def __setstate__(self, state: Dict[str, Any]) -> None:
self._model.to(self._device)

# Recover optimizer
self._optimizer.load_state_dict(torch.load(str(full_path) + ".optimizer")) # type: ignore
if os.path.isfile(str(full_path) + ".optimizer"):
self._optimizer.load_state_dict(torch.load(str(full_path) + ".optimizer")) # type: ignore

self.__dict__.pop("model_name", None)
self.__dict__.pop("inner_model", None)
Expand Down
4 changes: 2 additions & 2 deletions art/metrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,11 @@
SUPPORTED_METHODS: Dict[str, Dict[str, Any]] = {
"auto": {
"class": AutoAttack,
"params": {"eps_step": 0.1, "eps_max": 1.0},
"params": {"eps_step": 0.1},
},
"fgsm": {
"class": FastGradientMethod,
"params": {"eps_step": 0.1, "eps_max": 1.0, "clip_min": 0.0, "clip_max": 1.0},
"params": {"eps_step": 0.1},
},
"hsj": {
"class": HopSkipJump,
Expand Down
Loading

0 comments on commit 5d0fc3b

Please sign in to comment.