From a426e1a0c32072d213f8916a5f2c95a9335b5bb1 Mon Sep 17 00:00:00 2001 From: "penghouwen@icloud.com" Date: Mon, 20 Jul 2020 12:29:54 +0800 Subject: [PATCH 01/62] integrate CREAM NAS algorithm --- docs/en_US/NAS/Cream.md | 79 ++ examples/nas/cream/Cream.md | 94 +++ examples/nas/cream/__init__.py | 0 examples/nas/cream/dataset/__init__.py | 3 + examples/nas/cream/dataset/auto_augment.py | 789 ++++++++++++++++++ examples/nas/cream/dataset/base_dataset.py | 197 +++++ examples/nas/cream/dataset/loader.py | 240 ++++++ examples/nas/cream/dataset/processing.py | 204 +++++ examples/nas/cream/dataset/tiny_imagenet.py | 166 ++++ examples/nas/cream/dataset/transform.py | 182 ++++ examples/nas/cream/dataset/utils.py | 303 +++++++ examples/nas/cream/distributed_test.sh | 4 + examples/nas/cream/distributed_train.sh | 4 + examples/nas/cream/models/__init__.py | 0 examples/nas/cream/models/builder.py | 392 +++++++++ examples/nas/cream/models/hbuilder.py | 417 +++++++++ examples/nas/cream/models/hypernet.py | 307 +++++++ examples/nas/cream/models/model.py | 159 ++++ examples/nas/cream/models/resunit.py | 92 ++ examples/nas/cream/models/units.py | 355 ++++++++ examples/nas/cream/models/utils.py | 123 +++ examples/nas/cream/requirements.txt | 12 + examples/nas/cream/run.sh | 6 + examples/nas/cream/supernet.py | 389 +++++++++ examples/nas/cream/test.py | 482 +++++++++++ examples/nas/cream/test.sh | 2 + examples/nas/cream/utils/EMA.py | 66 ++ examples/nas/cream/utils/__init__.py | 0 examples/nas/cream/utils/flops_table.py | 77 ++ examples/nas/cream/utils/helpers.py | 169 ++++ examples/nas/cream/utils/loss.py | 31 + examples/nas/cream/utils/optimizer.py | 162 ++++ examples/nas/cream/utils/saver.py | 140 ++++ examples/nas/cream/utils/scheduler.py | 309 +++++++ .../pynni/nni/nas/pytorch/cream/__init__.py | 6 + .../pynni/nni/nas/pytorch/cream/mutator.py | 67 ++ .../pynni/nni/nas/pytorch/cream/trainer.py | 312 +++++++ 37 files changed, 6340 insertions(+) create mode 100644 docs/en_US/NAS/Cream.md create mode 100644 examples/nas/cream/Cream.md create mode 100755 examples/nas/cream/__init__.py create mode 100755 examples/nas/cream/dataset/__init__.py create mode 100755 examples/nas/cream/dataset/auto_augment.py create mode 100755 examples/nas/cream/dataset/base_dataset.py create mode 100755 examples/nas/cream/dataset/loader.py create mode 100755 examples/nas/cream/dataset/processing.py create mode 100755 examples/nas/cream/dataset/tiny_imagenet.py create mode 100755 examples/nas/cream/dataset/transform.py create mode 100755 examples/nas/cream/dataset/utils.py create mode 100755 examples/nas/cream/distributed_test.sh create mode 100755 examples/nas/cream/distributed_train.sh create mode 100755 examples/nas/cream/models/__init__.py create mode 100755 examples/nas/cream/models/builder.py create mode 100755 examples/nas/cream/models/hbuilder.py create mode 100755 examples/nas/cream/models/hypernet.py create mode 100755 examples/nas/cream/models/model.py create mode 100755 examples/nas/cream/models/resunit.py create mode 100755 examples/nas/cream/models/units.py create mode 100755 examples/nas/cream/models/utils.py create mode 100755 examples/nas/cream/requirements.txt create mode 100755 examples/nas/cream/run.sh create mode 100755 examples/nas/cream/supernet.py create mode 100755 examples/nas/cream/test.py create mode 100755 examples/nas/cream/test.sh create mode 100755 examples/nas/cream/utils/EMA.py create mode 100755 examples/nas/cream/utils/__init__.py create mode 100755 examples/nas/cream/utils/flops_table.py create mode 100755 examples/nas/cream/utils/helpers.py create mode 100755 examples/nas/cream/utils/loss.py create mode 100755 examples/nas/cream/utils/optimizer.py create mode 100755 examples/nas/cream/utils/saver.py create mode 100755 examples/nas/cream/utils/scheduler.py create mode 100755 src/sdk/pynni/nni/nas/pytorch/cream/__init__.py create mode 100755 src/sdk/pynni/nni/nas/pytorch/cream/mutator.py create mode 100755 src/sdk/pynni/nni/nas/pytorch/cream/trainer.py diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md new file mode 100644 index 0000000000..2f2d4573ce --- /dev/null +++ b/docs/en_US/NAS/Cream.md @@ -0,0 +1,79 @@ +# Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search + +## Introduction +One-shot weight sharing methods have recently drawn great attention in neural architecture search due to high efficiency and competitive performance. However, weight sharing across models has an inherent deficiency, i.e., insufficient training +of subnetworks in the hypernetwork. To alleviate this problem, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training +process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized +paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. We directly select the most promising +one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the +convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned +settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. + +## Reproduction Results + +## Examples + +[Example code](https://github.com/microsoft/nni/tree/master/examples/nas/cream) + +## Requirements +* python >= 3.6 +* torch >= 1.2 +* torchscope +* apex (not necessary, please make sure your nvcc CUDA version is the same with pytorch CUDA verision) + +## Data Preparation +You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./data/imagenet` and move the validation set to the subfolder `./data/imagenet/val`. To move the validation set, you cloud use the following script: + +Put the imagenet data in ${Root}/data. It should be like following: +```buildoutcfg +${Root}/data/imagenet/train +${Root}/data/imagenet/val +... +``` + + +## Quick Start + +### I. Search + +First, build environments for searching. +``` +pip install -r ./examples/nas/cream/requirements.txt +``` + +To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./examples/nas/cream/supernet.sh` +```buildoutcfg +--flops_minimum 0 # Minimum Flops of Architecture +--flops_maximum 600 # Maximum Flops of Architecture +``` + +After you specify the flops of the architectures you would like to search, you can search an architecture now by running: +```buildoutcfg +sh ./experiments/scripts/supernet.sh + +``` + +### II. Test +To test our trained of models, you need to use `model_selection` in `./examples/nas/cream/test.sh` to specify which model to test. +```buildoutcfg +--model_selection 42 # test 42m model +--model_selection 470 # test 470m model +...... +``` + +After specifying the flops of the model, you need to write the path to the resume model in `./examples/nas/cream/test.sh`. +```buildoutcfg +--resume './experiments/ckps/42.pth.tar' +--resume './experiments/ckps/470.pth.tar' +...... +``` + +We provide 14M/42M/114M/285M/470M/600M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). +After downloading the pretrained models and adding `--model_selection` and `--resume` in './experiments/scripts/test.sh', you need to use the following command to test the model. +```buildoutcfg +sh ./experiments/scripts/test.sh +``` + +The test result will be saved in `./retrain`. You can configure the `--ouput` in `./examples/nas/cream/test.sh` to specify a path for it. + + diff --git a/examples/nas/cream/Cream.md b/examples/nas/cream/Cream.md new file mode 100644 index 0000000000..cd38d38542 --- /dev/null +++ b/examples/nas/cream/Cream.md @@ -0,0 +1,94 @@ +# Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search + +## Introduction +One-shot weight sharing methods have recently drawn great attention in neural architecture search due to high efficiency and competitive performance. However, weight sharing across models has an inherent deficiency, i.e., insufficient training +of subnetworks in the hypernetwork. To alleviate this problem, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training +process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized +paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. We directly select the most promising +one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the +convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned +settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. + +## Reproduction Results + +## Examples + +[Example code](https://github.com/microsoft/nni/tree/master/examples/nas/cream) + +## Requirements +* python >= 3.6 +* torch >= 1.2 +* torchscope +* apex (not necessary, please make sure your nvcc CUDA version is the same with pytorch CUDA verision) + +## Data Preparation +You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./data/imagenet` and move the validation set to the subfolder `./data/imagenet/val`. To move the validation set, you cloud use the following script: + +Put the imagenet data in ${Root}/data. It should be like following: +```buildoutcfg +${Root}/data/imagenet/train +${Root}/data/imagenet/val +... +``` + + +## Quick Start + +### I. Search + +First, build environments for searching. +``` +pip install -r ./examples/nas/cream/requirements.txt +``` + +To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./examples/nas/cream/supernet.sh` +```buildoutcfg +--flops_minimum 0 # Minimum Flops of Architecture +--flops_maximum 600 # Maximum Flops of Architecture +``` + +After you specify the flops of the architectures you would like to search, you can search an architecture now by running: +```buildoutcfg +sh ./experiments/scripts/supernet.sh + +``` + +### II. Test +To test our trained of models, you need to use `model_selection` in `./examples/nas/cream/test.sh` to specify which model to test. +```buildoutcfg +--model_selection 42 # test 42m model +--model_selection 470 # test 470m model +...... +``` + +After specifying the flops of the model, you need to write the path to the resume model in `./examples/nas/cream/test.sh`. +```buildoutcfg +--resume './experiments/ckps/42.pth.tar' +--resume './experiments/ckps/470.pth.tar' +...... +``` + +We provide 14M/42M/114M/285M/470M/600M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). +After downloading the pretrained models and adding `--model_selection` and `--resume` in './experiments/scripts/test.sh', you need to use the following command to test the model. +```buildoutcfg +sh ./experiments/scripts/test.sh +``` + +The test result will be saved in `./retrain`. You can configure the `--ouput` in `./examples/nas/cream/test.sh` to specify a path for it. + + +### PyTorch + +```eval_rst +.. autoclass:: nni.nas.pytorch.cdarts.CdartsTrainer + :members: + +.. autoclass:: nni.nas.pytorch.cdarts.RegularizedDartsMutator + :members: + +.. autoclass:: nni.nas.pytorch.cdarts.DartsDiscreteMutator + :members: + +.. autoclass:: nni.nas.pytorch.cdarts.RegularizedMutatorParallel + :members: +``` \ No newline at end of file diff --git a/examples/nas/cream/__init__.py b/examples/nas/cream/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/examples/nas/cream/dataset/__init__.py b/examples/nas/cream/dataset/__init__.py new file mode 100755 index 0000000000..14620b5d4d --- /dev/null +++ b/examples/nas/cream/dataset/__init__.py @@ -0,0 +1,3 @@ +from dataset.loader import create_loader +from dataset.base_dataset import Dataset, AugMixDataset +from dataset.utils import resolve_data_config \ No newline at end of file diff --git a/examples/nas/cream/dataset/auto_augment.py b/examples/nas/cream/dataset/auto_augment.py new file mode 100755 index 0000000000..ce8c1f02b5 --- /dev/null +++ b/examples/nas/cream/dataset/auto_augment.py @@ -0,0 +1,789 @@ +import random +import math +import re +from PIL import Image, ImageOps, ImageEnhance, ImageChops +import PIL +import numpy as np + + +_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]]) + +_FILL = (128, 128, 128) + +# This signifies the max integer that the controller RNN could predict for the +# augmentation scheme. +_MAX_LEVEL = 10. + +_HPARAMS_DEFAULT = dict( + translate_const=250, + img_mean=_FILL, +) + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +def _interpolation(kwargs): + interpolation = kwargs.pop('resample', Image.BILINEAR) + if isinstance(interpolation, (list, tuple)): + return random.choice(interpolation) + else: + return interpolation + + +def _check_args_tf(kwargs): + if 'fillcolor' in kwargs and _PIL_VER < (5, 0): + kwargs.pop('fillcolor') + kwargs['resample'] = _interpolation(kwargs) + + +def shear_x(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs) + + +def shear_y(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs) + + +def translate_x_rel(img, pct, **kwargs): + pixels = pct * img.size[0] + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) + + +def translate_y_rel(img, pct, **kwargs): + pixels = pct * img.size[1] + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) + + +def translate_x_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) + + +def translate_y_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) + + +def rotate(img, degrees, **kwargs): + _check_args_tf(kwargs) + if _PIL_VER >= (5, 2): + return img.rotate(degrees, **kwargs) + elif _PIL_VER >= (5, 0): + w, h = img.size + post_trans = (0, 0) + rotn_center = (w / 2.0, h / 2.0) + angle = -math.radians(degrees) + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform( + -rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix + ) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + return img.transform(img.size, Image.AFFINE, matrix, **kwargs) + else: + return img.rotate(degrees, resample=kwargs['resample']) + + +def auto_contrast(img, **__): + return ImageOps.autocontrast(img) + + +def invert(img, **__): + return ImageOps.invert(img) + + +def equalize(img, **__): + return ImageOps.equalize(img) + + +def solarize(img, thresh, **__): + return ImageOps.solarize(img, thresh) + + +def solarize_add(img, add, thresh=128, **__): + lut = [] + for i in range(256): + if i < thresh: + lut.append(min(255, i + add)) + else: + lut.append(i) + if img.mode in ("L", "RGB"): + if img.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return img.point(lut) + else: + return img + + +def posterize(img, bits_to_keep, **__): + if bits_to_keep >= 8: + return img + return ImageOps.posterize(img, bits_to_keep) + + +def contrast(img, factor, **__): + return ImageEnhance.Contrast(img).enhance(factor) + + +def color(img, factor, **__): + return ImageEnhance.Color(img).enhance(factor) + + +def brightness(img, factor, **__): + return ImageEnhance.Brightness(img).enhance(factor) + + +def sharpness(img, factor, **__): + return ImageEnhance.Sharpness(img).enhance(factor) + + +def _randomly_negate(v): + """With 50% prob, negate the value""" + return -v if random.random() > 0.5 else v + + +def _rotate_level_to_arg(level, _hparams): + # range [-30, 30] + level = (level / _MAX_LEVEL) * 30. + level = _randomly_negate(level) + return level, + + +def _enhance_level_to_arg(level, _hparams): + # range [0.1, 1.9] + return (level / _MAX_LEVEL) * 1.8 + 0.1, + + +def _enhance_increasing_level_to_arg(level, _hparams): + # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend + # range [0.1, 1.9] + level = (level / _MAX_LEVEL) * .9 + level = 1.0 + _randomly_negate(level) + return level, + + +def _shear_level_to_arg(level, _hparams): + # range [-0.3, 0.3] + level = (level / _MAX_LEVEL) * 0.3 + level = _randomly_negate(level) + return level, + + +def _translate_abs_level_to_arg(level, hparams): + translate_const = hparams['translate_const'] + level = (level / _MAX_LEVEL) * float(translate_const) + level = _randomly_negate(level) + return level, + + +def _translate_rel_level_to_arg(level, hparams): + # default range [-0.45, 0.45] + translate_pct = hparams.get('translate_pct', 0.45) + level = (level / _MAX_LEVEL) * translate_pct + level = _randomly_negate(level) + return level, + + +def _posterize_level_to_arg(level, _hparams): + # As per Tensorflow TPU EfficientNet impl + # range [0, 4], 'keep 0 up to 4 MSB of original image' + # intensity/severity of augmentation decreases with level + return int((level / _MAX_LEVEL) * 4), + + +def _posterize_increasing_level_to_arg(level, hparams): + # As per Tensorflow models research and UDA impl + # range [4, 0], 'keep 4 down to 0 MSB of original image', + # intensity/severity of augmentation increases with level + return 4 - _posterize_level_to_arg(level, hparams)[0], + + +def _posterize_original_level_to_arg(level, _hparams): + # As per original AutoAugment paper description + # range [4, 8], 'keep 4 up to 8 MSB of image' + # intensity/severity of augmentation decreases with level + return int((level / _MAX_LEVEL) * 4) + 4, + + +def _solarize_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation decreases with level + return int((level / _MAX_LEVEL) * 256), + + +def _solarize_increasing_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation increases with level + return 256 - _solarize_level_to_arg(level, _hparams)[0], + + +def _solarize_add_level_to_arg(level, _hparams): + # range [0, 110] + return int((level / _MAX_LEVEL) * 110), + + +LEVEL_TO_ARG = { + 'AutoContrast': None, + 'Equalize': None, + 'Invert': None, + 'Rotate': _rotate_level_to_arg, + # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers + 'Posterize': _posterize_level_to_arg, + 'PosterizeIncreasing': _posterize_increasing_level_to_arg, + 'PosterizeOriginal': _posterize_original_level_to_arg, + 'Solarize': _solarize_level_to_arg, + 'SolarizeIncreasing': _solarize_increasing_level_to_arg, + 'SolarizeAdd': _solarize_add_level_to_arg, + 'Color': _enhance_level_to_arg, + 'ColorIncreasing': _enhance_increasing_level_to_arg, + 'Contrast': _enhance_level_to_arg, + 'ContrastIncreasing': _enhance_increasing_level_to_arg, + 'Brightness': _enhance_level_to_arg, + 'BrightnessIncreasing': _enhance_increasing_level_to_arg, + 'Sharpness': _enhance_level_to_arg, + 'SharpnessIncreasing': _enhance_increasing_level_to_arg, + 'ShearX': _shear_level_to_arg, + 'ShearY': _shear_level_to_arg, + 'TranslateX': _translate_abs_level_to_arg, + 'TranslateY': _translate_abs_level_to_arg, + 'TranslateXRel': _translate_rel_level_to_arg, + 'TranslateYRel': _translate_rel_level_to_arg, +} + + +NAME_TO_OP = { + 'AutoContrast': auto_contrast, + 'Equalize': equalize, + 'Invert': invert, + 'Rotate': rotate, + 'Posterize': posterize, + 'PosterizeIncreasing': posterize, + 'PosterizeOriginal': posterize, + 'Solarize': solarize, + 'SolarizeIncreasing': solarize, + 'SolarizeAdd': solarize_add, + 'Color': color, + 'ColorIncreasing': color, + 'Contrast': contrast, + 'ContrastIncreasing': contrast, + 'Brightness': brightness, + 'BrightnessIncreasing': brightness, + 'Sharpness': sharpness, + 'SharpnessIncreasing': sharpness, + 'ShearX': shear_x, + 'ShearY': shear_y, + 'TranslateX': translate_x_abs, + 'TranslateY': translate_y_abs, + 'TranslateXRel': translate_x_rel, + 'TranslateYRel': translate_y_rel, +} + + +class AugmentOp: + + def __init__(self, name, prob=0.5, magnitude=10, hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + self.aug_fn = NAME_TO_OP[name] + self.level_fn = LEVEL_TO_ARG[name] + self.prob = prob + self.magnitude = magnitude + self.hparams = hparams.copy() + self.kwargs = dict( + fillcolor=hparams['img_mean'] if 'img_mean' in hparams else _FILL, + resample=hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION, + ) + + # If magnitude_std is > 0, we introduce some randomness + # in the usually fixed policy and sample magnitude from a normal distribution + # with mean `magnitude` and std-dev of `magnitude_std`. + # NOTE This is my own hack, being tested, not in papers or reference impls. + self.magnitude_std = self.hparams.get('magnitude_std', 0) + + def __call__(self, img): + if self.prob < 1.0 and random.random() > self.prob: + return img + magnitude = self.magnitude + if self.magnitude_std and self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range + level_args = self.level_fn(magnitude, self.hparams) if self.level_fn is not None else tuple() + return self.aug_fn(img, *level_args, **self.kwargs) + + +def auto_augment_policy_v0(hparams): + # ImageNet v0 policy from TPU EfficientNet impl, cannot find a paper reference. + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], # This results in black image with Tpu posterize + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_v0r(hparams): + # ImageNet v0 policy from TPU EfficientNet impl, with variation of Posterize used + # in Google research implementation (number of bits discarded increases with magnitude) + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('PosterizeIncreasing', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('PosterizeIncreasing', 0.8, 2), ('Solarize', 0.6, 10)], + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_original(hparams): + # ImageNet policy from https://arxiv.org/abs/1805.09501 + policy = [ + [('PosterizeOriginal', 0.4, 8), ('Rotate', 0.6, 9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('PosterizeOriginal', 0.6, 7), ('PosterizeOriginal', 0.6, 6)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('PosterizeOriginal', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('PosterizeOriginal', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_originalr(hparams): + # ImageNet policy from https://arxiv.org/abs/1805.09501 with research posterize variation + policy = [ + [('PosterizeIncreasing', 0.4, 8), ('Rotate', 0.6, 9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('PosterizeIncreasing', 0.6, 7), ('PosterizeIncreasing', 0.6, 6)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('PosterizeIncreasing', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('PosterizeIncreasing', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy(name='v0', hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + if name == 'original': + return auto_augment_policy_original(hparams) + elif name == 'originalr': + return auto_augment_policy_originalr(hparams) + elif name == 'v0': + return auto_augment_policy_v0(hparams) + elif name == 'v0r': + return auto_augment_policy_v0r(hparams) + else: + assert False, 'Unknown AA policy (%s)' % name + + +class AutoAugment: + + def __init__(self, policy): + self.policy = policy + + def __call__(self, img): + sub_policy = random.choice(self.policy) + for op in sub_policy: + img = op(img) + return img + + +def auto_augment_transform(config_str, hparams): + """ + Create a AutoAugment transform + :param config_str: String defining configuration of auto augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the AutoAugment policy (one of 'v0', 'v0r', 'original', 'originalr'). + The remaining sections, not order sepecific determine + 'mstd' - float std deviation of magnitude noise applied + Ex 'original-mstd0.5' results in AutoAugment with original policy, magnitude_std 0.5 + :param hparams: Other hparams (kwargs) for the AutoAugmentation scheme + :return: A PyTorch compatible Transform + """ + config = config_str.split('-') + policy_name = config[0] + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + else: + assert False, 'Unknown AutoAugment config section' + aa_policy = auto_augment_policy(policy_name, hparams=hparams) + return AutoAugment(aa_policy) + + +_RAND_TRANSFORMS = [ + 'AutoContrast', + 'Equalize', + 'Invert', + 'Rotate', + 'Posterize', + 'Solarize', + 'SolarizeAdd', + 'Color', + 'Contrast', + 'Brightness', + 'Sharpness', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', + #'Cutout' # NOTE I've implement this as random erasing separately +] + + +_RAND_INCREASING_TRANSFORMS = [ + 'AutoContrast', + 'Equalize', + 'Invert', + 'Rotate', + 'PosterizeIncreasing', + 'SolarizeIncreasing', + 'SolarizeAdd', + 'ColorIncreasing', + 'ContrastIncreasing', + 'BrightnessIncreasing', + 'SharpnessIncreasing', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', + #'Cutout' # NOTE I've implement this as random erasing separately +] + + + +# These experimental weights are based loosely on the relative improvements mentioned in paper. +# They may not result in increased performance, but could likely be tuned to so. +_RAND_CHOICE_WEIGHTS_0 = { + 'Rotate': 0.3, + 'ShearX': 0.2, + 'ShearY': 0.2, + 'TranslateXRel': 0.1, + 'TranslateYRel': 0.1, + 'Color': .025, + 'Sharpness': 0.025, + 'AutoContrast': 0.025, + 'Solarize': .005, + 'SolarizeAdd': .005, + 'Contrast': .005, + 'Brightness': .005, + 'Equalize': .005, + 'Posterize': 0, + 'Invert': 0, +} + + +def _select_rand_weights(weight_idx=0, transforms=None): + transforms = transforms or _RAND_TRANSFORMS + assert weight_idx == 0 # only one set of weights currently + rand_weights = _RAND_CHOICE_WEIGHTS_0 + probs = [rand_weights[k] for k in transforms] + probs /= np.sum(probs) + return probs + + +def rand_augment_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _RAND_TRANSFORMS + return [AugmentOp( + name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms] + + +class RandAugment: + def __init__(self, ops, num_layers=2, choice_weights=None): + self.ops = ops + self.num_layers = num_layers + self.choice_weights = choice_weights + + def __call__(self, img): + # no replacement when using weighted choice + ops = np.random.choice( + self.ops, self.num_layers, replace=self.choice_weights is None, p=self.choice_weights) + for op in ops: + img = op(img) + return img + + +def rand_augment_transform(config_str, hparams): + """ + Create a RandAugment transform + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude of rand augment + 'n' - integer num layers (number of transform ops selected per image) + 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op) + 'mstd' - float std deviation of magnitude noise applied + 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0) + Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5 + 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2 + :param hparams: Other hparams (kwargs) for the RandAugmentation scheme + :return: A PyTorch compatible Transform + """ + magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10) + num_layers = 2 # default to 2 ops per image + weight_idx = None # default to no probability weights for op choice + transforms = _RAND_TRANSFORMS + config = config_str.split('-') + assert config[0] == 'rand' + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + elif key == 'inc': + if bool(val): + transforms = _RAND_INCREASING_TRANSFORMS + elif key == 'm': + magnitude = int(val) + elif key == 'n': + num_layers = int(val) + elif key == 'w': + weight_idx = int(val) + else: + assert False, 'Unknown RandAugment config section' + ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms) + choice_weights = None if weight_idx is None else _select_rand_weights(weight_idx) + return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) + + +_AUGMIX_TRANSFORMS = [ + 'AutoContrast', + 'ColorIncreasing', # not in paper + 'ContrastIncreasing', # not in paper + 'BrightnessIncreasing', # not in paper + 'SharpnessIncreasing', # not in paper + 'Equalize', + 'Rotate', + 'PosterizeIncreasing', + 'SolarizeIncreasing', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', +] + + +def augmix_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _AUGMIX_TRANSFORMS + return [AugmentOp( + name, prob=1.0, magnitude=magnitude, hparams=hparams) for name in transforms] + + +class AugMixAugment: + """ AugMix Transform + Adapted and improved from impl here: https://github.com/google-research/augmix/blob/master/imagenet.py + From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - + https://arxiv.org/abs/1912.02781 + """ + def __init__(self, ops, alpha=1., width=3, depth=-1, blended=False): + self.ops = ops + self.alpha = alpha + self.width = width + self.depth = depth + self.blended = blended # blended mode is faster but not well tested + + def _calc_blended_weights(self, ws, m): + ws = ws * m + cump = 1. + rws = [] + for w in ws[::-1]: + alpha = w / cump + cump *= (1 - alpha) + rws.append(alpha) + return np.array(rws[::-1], dtype=np.float32) + + def _apply_blended(self, img, mixing_weights, m): + # This is my first crack and implementing a slightly faster mixed augmentation. Instead + # of accumulating the mix for each chain in a Numpy array and then blending with original, + # it recomputes the blending coefficients and applies one PIL image blend per chain. + # TODO the results appear in the right ballpark but they differ by more than rounding. + img_orig = img.copy() + ws = self._calc_blended_weights(mixing_weights, m) + for w in ws: + depth = self.depth if self.depth > 0 else np.random.randint(1, 4) + ops = np.random.choice(self.ops, depth, replace=True) + img_aug = img_orig # no ops are in-place, deep copy not necessary + for op in ops: + img_aug = op(img_aug) + img = Image.blend(img, img_aug, w) + return img + + def _apply_basic(self, img, mixing_weights, m): + # This is a literal adaptation of the paper/official implementation without normalizations and + # PIL <-> Numpy conversions between every op. It is still quite CPU compute heavy compared to the + # typical augmentation transforms, could use a GPU / Kornia implementation. + img_shape = img.size[0], img.size[1], len(img.getbands()) + mixed = np.zeros(img_shape, dtype=np.float32) + for mw in mixing_weights: + depth = self.depth if self.depth > 0 else np.random.randint(1, 4) + ops = np.random.choice(self.ops, depth, replace=True) + img_aug = img # no ops are in-place, deep copy not necessary + for op in ops: + img_aug = op(img_aug) + mixed += mw * np.asarray(img_aug, dtype=np.float32) + np.clip(mixed, 0, 255., out=mixed) + mixed = Image.fromarray(mixed.astype(np.uint8)) + return Image.blend(img, mixed, m) + + def __call__(self, img): + mixing_weights = np.float32(np.random.dirichlet([self.alpha] * self.width)) + m = np.float32(np.random.beta(self.alpha, self.alpha)) + if self.blended: + mixed = self._apply_blended(img, mixing_weights, m) + else: + mixed = self._apply_basic(img, mixing_weights, m) + return mixed + + +def augment_and_mix_transform(config_str, hparams): + """ Create AugMix PyTorch transform + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude (severity) of augmentation mix (default: 3) + 'w' - integer width of augmentation chain (default: 3) + 'd' - integer depth of augmentation chain (-1 is random [1, 3], default: -1) + 'b' - integer (bool), blend each branch of chain into end result without a final blend, less CPU (default: 0) + 'mstd' - float std deviation of magnitude noise applied (default: 0) + Ex 'augmix-m5-w4-d2' results in AugMix with severity 5, chain width 4, chain depth 2 + :param hparams: Other hparams (kwargs) for the Augmentation transforms + :return: A PyTorch compatible Transform + """ + magnitude = 3 + width = 3 + depth = -1 + alpha = 1. + blended = False + config = config_str.split('-') + assert config[0] == 'augmix' + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + elif key == 'm': + magnitude = int(val) + elif key == 'w': + width = int(val) + elif key == 'd': + depth = int(val) + elif key == 'a': + alpha = float(val) + elif key == 'b': + blended = bool(val) + else: + assert False, 'Unknown AugMix config section' + ops = augmix_ops(magnitude=magnitude, hparams=hparams) + return AugMixAugment(ops, alpha=alpha, width=width, depth=depth, blended=blended) \ No newline at end of file diff --git a/examples/nas/cream/dataset/base_dataset.py b/examples/nas/cream/dataset/base_dataset.py new file mode 100755 index 0000000000..222caffda3 --- /dev/null +++ b/examples/nas/cream/dataset/base_dataset.py @@ -0,0 +1,197 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import torch.utils.data as data + +import os +import re +import torch +import tarfile +from PIL import Image + + +IMG_EXTENSIONS = ['.png', '.jpg', '.jpeg'] + + +def natural_key(string_): + """See http://www.codinghorror.com/blog/archives/001018.html""" + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True): + labels = [] + filenames = [] + for root, subdirs, files in os.walk(folder, topdown=False): + rel_path = os.path.relpath(root, folder) if (root != folder) else '' + label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_') + for f in files: + base, ext = os.path.splitext(f) + if ext.lower() in types: + filenames.append(os.path.join(root, f)) + labels.append(label) + if class_to_idx is None: + # building class index + unique_labels = set(labels) + sorted_labels = list(sorted(unique_labels, key=natural_key)) + class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + images_and_targets = zip(filenames, [class_to_idx[l] for l in labels]) + if sort: + images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0])) + return images_and_targets, class_to_idx + + +def load_class_map(filename, root=''): + class_to_idx = {} + class_map_path = filename + if not os.path.exists(class_map_path): + class_map_path = os.path.join(root, filename) + assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % filename + class_map_ext = os.path.splitext(filename)[-1].lower() + if class_map_ext == '.txt': + with open(class_map_path) as f: + class_to_idx = {v.strip(): k for k, v in enumerate(f)} + else: + assert False, 'Unsupported class map extension' + return class_to_idx + + +class Dataset(data.Dataset): + + def __init__( + self, + root, + load_bytes=False, + transform=None, + class_map=''): + + class_to_idx = None + if class_map: + class_to_idx = load_class_map(class_map, root) + images, class_to_idx = find_images_and_targets(root, class_to_idx=class_to_idx) + if len(images) == 0: + raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" + "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) + self.root = root + self.samples = images + self.imgs = self.samples # torchvision ImageFolder compat + self.class_to_idx = class_to_idx + self.load_bytes = load_bytes + self.transform = transform + + def __getitem__(self, index): + path, target = self.samples[index] + img = open(path, 'rb').read() if self.load_bytes else Image.open(path).convert('RGB') + if self.transform is not None: + img = self.transform(img) + if target is None: + target = torch.zeros(1).long() + return img, target + + def __len__(self): + return len(self.imgs) + + def filenames(self, indices=[], basename=False): + if indices: + if basename: + return [os.path.basename(self.samples[i][0]) for i in indices] + else: + return [self.samples[i][0] for i in indices] + else: + if basename: + return [os.path.basename(x[0]) for x in self.samples] + else: + return [x[0] for x in self.samples] + + +def _extract_tar_info(tarfile, class_to_idx=None, sort=True): + files = [] + labels = [] + for ti in tarfile.getmembers(): + if not ti.isfile(): + continue + dirname, basename = os.path.split(ti.path) + label = os.path.basename(dirname) + ext = os.path.splitext(basename)[1] + if ext.lower() in IMG_EXTENSIONS: + files.append(ti) + labels.append(label) + if class_to_idx is None: + unique_labels = set(labels) + sorted_labels = list(sorted(unique_labels, key=natural_key)) + class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + tarinfo_and_targets = zip(files, [class_to_idx[l] for l in labels]) + if sort: + tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path)) + return tarinfo_and_targets, class_to_idx + + +class DatasetTar(data.Dataset): + + def __init__(self, root, load_bytes=False, transform=None, class_map=''): + + class_to_idx = None + if class_map: + class_to_idx = load_class_map(class_map, root) + assert os.path.isfile(root) + self.root = root + with tarfile.open(root) as tf: # cannot keep this open across processes, reopen later + self.samples, self.class_to_idx = _extract_tar_info(tf, class_to_idx) + self.tarfile = None # lazy init in __getitem__ + self.load_bytes = load_bytes + self.transform = transform + + def __getitem__(self, index): + if self.tarfile is None: + self.tarfile = tarfile.open(self.root) + tarinfo, target = self.samples[index] + iob = self.tarfile.extractfile(tarinfo) + img = iob.read() if self.load_bytes else Image.open(iob).convert('RGB') + if self.transform is not None: + img = self.transform(img) + if target is None: + target = torch.zeros(1).long() + return img, target + + def __len__(self): + return len(self.samples) + + +class AugMixDataset(torch.utils.data.Dataset): + """Dataset wrapper to perform AugMix or other clean/augmentation mixes""" + + def __init__(self, dataset, num_splits=2): + self.augmentation = None + self.normalize = None + self.dataset = dataset + if self.dataset.transform is not None: + self._set_transforms(self.dataset.transform) + self.num_splits = num_splits + + def _set_transforms(self, x): + assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms' + self.dataset.transform = x[0] + self.augmentation = x[1] + self.normalize = x[2] + + @property + def transform(self): + return self.dataset.transform + + @transform.setter + def transform(self, x): + self._set_transforms(x) + + def _normalize(self, x): + return x if self.normalize is None else self.normalize(x) + + def __getitem__(self, i): + x, y = self.dataset[i] # all splits share the same dataset base transform + x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split) + # run the full augmentation on the remaining splits + for _ in range(self.num_splits - 1): + x_list.append(self._normalize(self.augmentation(x))) + return tuple(x_list), y + + def __len__(self): + return len(self.dataset) \ No newline at end of file diff --git a/examples/nas/cream/dataset/loader.py b/examples/nas/cream/dataset/loader.py new file mode 100755 index 0000000000..fa6199b200 --- /dev/null +++ b/examples/nas/cream/dataset/loader.py @@ -0,0 +1,240 @@ +import torch.utils.data +import numpy as np + +from dataset.transform import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, create_transform +from dataset.utils import RandomErasing + +import math +import torch +from torch.utils.data import Sampler +import torch.distributed as dist + + +class OrderedDistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, dataset, num_replicas=None, rank=None): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + indices = list(range(len(self.dataset))) + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + +def fast_collate(batch): + """ A fast collation function optimized for uint8 images (np array or torch) and int64 targets (labels)""" + assert isinstance(batch[0], tuple) + batch_size = len(batch) + if isinstance(batch[0][0], tuple): + # This branch 'deinterleaves' and flattens tuples of input tensors into one tensor ordered by position + # such that all tuple of position n will end up in a torch.split(tensor, batch_size) in nth position + inner_tuple_size = len(batch[0][0]) + flattened_batch_size = batch_size * inner_tuple_size + targets = torch.zeros(flattened_batch_size, dtype=torch.int64) + tensor = torch.zeros((flattened_batch_size, *batch[0][0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + assert len(batch[i][0]) == inner_tuple_size # all input tensor tuples must be same length + for j in range(inner_tuple_size): + targets[i + j * batch_size] = batch[i][1] + tensor[i + j * batch_size] += torch.from_numpy(batch[i][0][j]) + return tensor, targets + elif isinstance(batch[0][0], np.ndarray): + targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) + assert len(targets) == batch_size + tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + tensor[i] += torch.from_numpy(batch[i][0]) + return tensor, targets + elif isinstance(batch[0][0], torch.Tensor): + targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) + assert len(targets) == batch_size + tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + tensor[i].copy_(batch[i][0]) + return tensor, targets + else: + assert False + + +class PrefetchLoader: + + def __init__(self, + loader, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + fp16=False, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0): + self.loader = loader + self.mean = torch.tensor([x * 255 for x in mean]).cuda().view(1, 3, 1, 1) + self.std = torch.tensor([x * 255 for x in std]).cuda().view(1, 3, 1, 1) + self.fp16 = fp16 + if fp16: + self.mean = self.mean.half() + self.std = self.std.half() + if re_prob > 0.: + self.random_erasing = RandomErasing( + probability=re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits) + else: + self.random_erasing = None + + def __iter__(self): + stream = torch.cuda.Stream() + first = True + + for next_input, next_target in self.loader: + with torch.cuda.stream(stream): + next_input = next_input.cuda(non_blocking=True) + next_target = next_target.cuda(non_blocking=True) + if self.fp16: + next_input = next_input.half().sub_(self.mean).div_(self.std) + else: + next_input = next_input.float().sub_(self.mean).div_(self.std) + if self.random_erasing is not None: + next_input = self.random_erasing(next_input) + + if not first: + yield input, target + else: + first = False + + torch.cuda.current_stream().wait_stream(stream) + input = next_input + target = next_target + + yield input, target + + def __len__(self): + return len(self.loader) + + @property + def sampler(self): + return self.loader.sampler + + @property + def dataset(self): + return self.loader.dataset + + +def create_loader( + dataset, + input_size, + batch_size, + is_training=False, + use_prefetcher=True, + re_prob=0., + re_mode='const', + re_count=1, + re_split=False, + color_jitter=0.4, + auto_augment=None, + num_aug_splits=0, + interpolation='bilinear', + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + num_workers=1, + distributed=False, + crop_pct=None, + collate_fn=None, + pin_memory=False, + fp16=False, + tf_preprocessing=False, + use_multi_epochs_loader=False +): + re_num_splits = 0 + if re_split: + # apply RE to second half of batch if no aug split otherwise line up with aug split + re_num_splits = num_aug_splits or 2 + dataset.transform = create_transform( + input_size, + is_training=is_training, + use_prefetcher=use_prefetcher, + color_jitter=color_jitter, + auto_augment=auto_augment, + interpolation=interpolation, + mean=mean, + std=std, + crop_pct=crop_pct, + tf_preprocessing=tf_preprocessing, + re_prob=re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits, + separate=num_aug_splits > 0, + ) + + sampler = None + if distributed: + if is_training: + sampler = torch.utils.data.distributed.DistributedSampler(dataset) + else: + # This will add extra duplicate entries to result in equal num + # of samples per-process, will slightly alter validation results + sampler = OrderedDistributedSampler(dataset) + + if collate_fn is None: + collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate + + loader_class = torch.utils.data.DataLoader + + loader = loader_class( + dataset, + batch_size=batch_size, + shuffle=sampler is None and is_training, + num_workers=num_workers, + sampler=sampler, + collate_fn=collate_fn, + pin_memory=pin_memory, + drop_last=is_training, + ) + if use_prefetcher: + loader = PrefetchLoader( + loader, + mean=mean, + std=std, + fp16=fp16, + re_prob=re_prob if is_training else 0., + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits + ) + + return loader diff --git a/examples/nas/cream/dataset/processing.py b/examples/nas/cream/dataset/processing.py new file mode 100755 index 0000000000..a6f3dbf219 --- /dev/null +++ b/examples/nas/cream/dataset/processing.py @@ -0,0 +1,204 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +import numpy as np + +IMAGE_SIZE = 224 +CROP_PADDING = 32 + + +def distorted_bounding_box_crop(image_bytes, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.05, 1.0), + max_attempts=100, + scope=None): + """Generates cropped_image using one of the bboxes randomly distorted. + See `tf.image.sample_distorted_bounding_box` for more documentation. + Args: + image_bytes: `Tensor` of binary image data. + bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` + where each coordinate is [0, 1) and the coordinates are arranged + as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole + image. + min_object_covered: An optional `float`. Defaults to `0.1`. The cropped + area of the image must contain at least this fraction of any bounding + box supplied. + aspect_ratio_range: An optional list of `float`s. The cropped area of the + image must have an aspect ratio = width / height within this range. + area_range: An optional list of `float`s. The cropped area of the image + must contain a fraction of the supplied image within in this range. + max_attempts: An optional `int`. Number of attempts at generating a cropped + region of the image of the specified constraints. After `max_attempts` + failures, return the entire image. + scope: Optional `str` for name scope. + Returns: + cropped image `Tensor` + """ + with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]): + shape = tf.image.extract_jpeg_shape(image_bytes) + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + shape, + bounding_boxes=bbox, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + max_attempts=max_attempts, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, _ = sample_distorted_bounding_box + + # Crop the image to the specified bounding box. + offset_y, offset_x, _ = tf.unstack(bbox_begin) + target_height, target_width, _ = tf.unstack(bbox_size) + crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) + image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) + + return image + + +def _at_least_x_are_equal(a, b, x): + """At least `x` of `a` and `b` `Tensors` are equal.""" + match = tf.equal(a, b) + match = tf.cast(match, tf.int32) + return tf.greater_equal(tf.reduce_sum(match), x) + + +def _decode_and_random_crop(image_bytes, image_size, resize_method): + """Make a random crop of image_size.""" + bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) + image = distorted_bounding_box_crop( + image_bytes, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(3. / 4, 4. / 3.), + area_range=(0.08, 1.0), + max_attempts=10, + scope=None) + original_shape = tf.image.extract_jpeg_shape(image_bytes) + bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3) + + image = tf.cond( + bad, + lambda: _decode_and_center_crop(image_bytes, image_size), + lambda: tf.image.resize([image], [image_size, image_size], resize_method)[0]) + + return image + + +def _decode_and_center_crop(image_bytes, image_size, resize_method): + """Crops to center of image with padding then scales image_size.""" + shape = tf.image.extract_jpeg_shape(image_bytes) + image_height = shape[0] + image_width = shape[1] + + padded_center_crop_size = tf.cast( + ((image_size / (image_size + CROP_PADDING)) * + tf.cast(tf.minimum(image_height, image_width), tf.float32)), + tf.int32) + + offset_height = ((image_height - padded_center_crop_size) + 1) // 2 + offset_width = ((image_width - padded_center_crop_size) + 1) // 2 + crop_window = tf.stack([offset_height, offset_width, + padded_center_crop_size, padded_center_crop_size]) + image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) + image = tf.image.resize([image], [image_size, image_size], resize_method)[0] + + return image + + +def _flip(image): + """Random horizontal image flip.""" + image = tf.image.random_flip_left_right(image) + return image + + +def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): + """Preprocesses the given image for evaluation. + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + Returns: + A preprocessed image `Tensor`. + """ + resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR + image = _decode_and_random_crop(image_bytes, image_size, resize_method) + image = _flip(image) + image = tf.reshape(image, [image_size, image_size, 3]) + image = tf.image.convert_image_dtype( + image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) + return image + + +def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): + """Preprocesses the given image for evaluation. + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + Returns: + A preprocessed image `Tensor`. + """ + resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR + image = _decode_and_center_crop(image_bytes, image_size, resize_method) + image = tf.reshape(image, [image_size, image_size, 3]) + image = tf.image.convert_image_dtype( + image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) + return image + + +def preprocess_image(image_bytes, + is_training=False, + use_bfloat16=False, + image_size=IMAGE_SIZE, + interpolation='bicubic'): + """Preprocesses the given image. + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + is_training: `bool` for whether the preprocessing is for training. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + Returns: + A preprocessed image `Tensor` with value range of [0, 255]. + """ + if is_training: + return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation) + else: + return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation) + + +class TfPreprocessTransform: + + def __init__(self, is_training=False, size=224, interpolation='bicubic'): + self.is_training = is_training + self.size = size[0] if isinstance(size, tuple) else size + self.interpolation = interpolation + self._image_bytes = None + self.process_image = self._build_tf_graph() + self.sess = None + + def _build_tf_graph(self): + with tf.device('/cpu:0'): + self._image_bytes = tf.placeholder( + shape=[], + dtype=tf.string, + ) + img = preprocess_image( + self._image_bytes, self.is_training, False, self.size, self.interpolation) + return img + + def __call__(self, image_bytes): + if self.sess is None: + self.sess = tf.Session() + img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes}) + img = img.round().clip(0, 255).astype(np.uint8) + if img.ndim < 3: + img = np.expand_dims(img, axis=-1) + img = np.rollaxis(img, 2) # HWC to CHW + return img \ No newline at end of file diff --git a/examples/nas/cream/dataset/tiny_imagenet.py b/examples/nas/cream/dataset/tiny_imagenet.py new file mode 100755 index 0000000000..cedafe4810 --- /dev/null +++ b/examples/nas/cream/dataset/tiny_imagenet.py @@ -0,0 +1,166 @@ +from __future__ import print_function +import os +import os.path +import errno +import torch +import numpy as np +import sys +import cv2 +from PIL import Image + +import torch.utils.data as data +import torchvision.transforms as transforms +from torchvision.datasets.utils import download_url, check_integrity + +IMG_EXTENSIONS = [ + '.jpg', '.JPG', '.jpeg', '.JPEG', + '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', +] + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def find_classes(class_file): + with open(class_file) as r: + classes = map(lambda s: s.strip(), r.readlines()) + + # classes.sort() + # class_to_idx = {classes[i]: i for i in range(len(classes))} + + class_to_idx = {iclass: i for i, iclass in enumerate(classes)} + + return classes, class_to_idx + + +def loadPILImage(path): + trans_img = Image.open(path).convert('RGB') + return trans_img + +def loadCVImage(path): + img = cv2.imread(path, cv2.IMREAD_COLOR) + trans_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + return Image.fromarray(trans_img.astype('uint8'), 'RGB') + +def make_dataset(root, base_folder, dirname, class_to_idx): + images = [] + dir_path = os.path.join(root, base_folder) + + if dirname == 'train': + for fname in sorted(os.listdir(dir_path)): + cls_fpath = os.path.join(dir_path, fname) + if os.path.isdir(cls_fpath): + imgfnames = sorted(os.listdir(cls_fpath))[:250] + for imgname in imgfnames: + if is_image_file(imgname): + path = os.path.join(cls_fpath, imgname) + item = (path, class_to_idx[fname]) + images.append(item) + elif dirname == 'val': + for fname in sorted(os.listdir(dir_path)): + cls_fpath = os.path.join(dir_path, fname) + if os.path.isdir(cls_fpath): + imgfnames = sorted(os.listdir(cls_fpath))[250:350] + for imgname in imgfnames: + if is_image_file(imgname): + path = os.path.join(cls_fpath, imgname) + item = (path, class_to_idx[fname]) + images.append(item) + + return images + +class NewImageNet(data.Dataset): + + base_folder = 'new_dataset' + def __init__(self, root, train=True, + target_transform=None, + test=False, loader='opencv'): + self.root = os.path.expanduser(root) + if train: + self.transform = transforms.Compose([ + # transforms.RandomCrop(64, padding=4), + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + else: + self.transform = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + self.target_transform = target_transform + self.train = train # training set or test set + self.loader = loader + + _, class_to_idx = find_classes(os.path.join(self.root, self.base_folder, 'info.txt')) + # self.classes = classes + + if self.train: + dirname = 'train' + else: + dirname = 'val' + + self.class_to_idx = class_to_idx + self.idx_to_class = dict() + for idx, key in enumerate(class_to_idx.keys()): + self.idx_to_class[idx] = key + + self.data_info = make_dataset(self.root, self.base_folder, dirname, class_to_idx) + + if len(self.data_info) == 0: + raise (RuntimeError("Found 0 images in subfolders of: " + root + "\n" + "Supported image extensions are: " + ",".join( + IMG_EXTENSIONS))) + + def __getitem__(self, index): + """ + Args: + index (int): Index + Returns: + tuple: (img_path, target) where target is index of the target class. + """ + + img_path, target = self.data_info[index][0], self.data_info[index][1] + + if self.loader == 'pil': + img = loadPILImage(img_path) + else: + img = loadCVImage(img_path) + + if self.transform is not None: + result_img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return result_img, target + + def __len__(self): + return len(self.data_info) + + +def get_newimagenet(dir, batch_size): + train_data = NewImageNet(root=dir, train=True) + test_data = NewImageNet(root=dir, train=False) + + test_sampler = torch.utils.data.distributed.DistributedSampler(test_data) + train_sampler = torch.utils.data.distributed.DistributedSampler(train_data) + + test_loader = torch.utils.data.DataLoader( + test_data, batch_size=batch_size, + sampler=test_sampler, + pin_memory=True, num_workers=16) + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=batch_size, + sampler=train_sampler, + pin_memory=True, num_workers=16) + + return [train_loader, test_loader], [train_sampler, test_sampler] + + diff --git a/examples/nas/cream/dataset/transform.py b/examples/nas/cream/dataset/transform.py new file mode 100755 index 0000000000..6625143eaf --- /dev/null +++ b/examples/nas/cream/dataset/transform.py @@ -0,0 +1,182 @@ +import math + +import torch +from torchvision import transforms + +DEFAULT_CROP_PCT = 0.875 +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) + +from dataset.auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform +from dataset.utils import RandomErasing, _pil_interp, RandomResizedCropAndInterpolation, ToNumpy + +def transforms_imagenet_train( + img_size=224, + scale=(0.08, 1.0), + color_jitter=0.4, + auto_augment=None, + interpolation='random', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0, + separate=False, +): + """ + If separate==True, the transforms are returned as a tuple of 3 separate transforms + for use in a mixing dataset that passes + * all data through the first (primary) transform, called the 'clean' data + * a portion of the data through the secondary transform + * normalizes and converts the branches above with the third, final transform + """ + primary_tfl = [ + RandomResizedCropAndInterpolation( + img_size, scale=scale, interpolation=interpolation), + transforms.RandomHorizontalFlip() + ] + + secondary_tfl = [] + if auto_augment: + assert isinstance(auto_augment, str) + if isinstance(img_size, tuple): + img_size_min = min(img_size) + else: + img_size_min = img_size + aa_params = dict( + translate_const=int(img_size_min * 0.45), + img_mean=tuple([min(255, round(255 * x)) for x in mean]), + ) + if interpolation and interpolation != 'random': + aa_params['interpolation'] = _pil_interp(interpolation) + if auto_augment.startswith('rand'): + secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] + elif auto_augment.startswith('augmix'): + aa_params['translate_pct'] = 0.3 + secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)] + else: + secondary_tfl += [auto_augment_transform(auto_augment, aa_params)] + elif color_jitter is not None: + # color jitter is enabled when not using AA + if isinstance(color_jitter, (list, tuple)): + # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation + # or 4 if also augmenting hue + assert len(color_jitter) in (3, 4) + else: + # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue + color_jitter = (float(color_jitter),) * 3 + secondary_tfl += [transforms.ColorJitter(*color_jitter)] + + final_tfl = [] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + final_tfl += [ToNumpy()] + else: + final_tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + if re_prob > 0.: + final_tfl.append( + RandomErasing(re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu')) + + if separate: + return transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl) + else: + return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) + + +def transforms_imagenet_eval( + img_size=224, + crop_pct=None, + interpolation='bilinear', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD): + crop_pct = crop_pct or DEFAULT_CROP_PCT + + if isinstance(img_size, tuple): + assert len(img_size) == 2 + if img_size[-1] == img_size[-2]: + # fall-back to older behaviour so Resize scales to shortest edge if target is square + scale_size = int(math.floor(img_size[0] / crop_pct)) + else: + scale_size = tuple([int(x / crop_pct) for x in img_size]) + else: + scale_size = int(math.floor(img_size / crop_pct)) + + tfl = [ + transforms.Resize(scale_size, _pil_interp(interpolation)), + transforms.CenterCrop(img_size), + ] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + tfl += [ToNumpy()] + else: + tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + + return transforms.Compose(tfl) + + +def create_transform( + input_size, + is_training=False, + use_prefetcher=False, + color_jitter=0.4, + auto_augment=None, + interpolation='bilinear', + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0, + crop_pct=None, + tf_preprocessing=False, + separate=False): + + if isinstance(input_size, tuple): + img_size = input_size[-2:] + else: + img_size = input_size + + if tf_preprocessing and use_prefetcher: + assert not separate, "Separate transforms not supported for TF preprocessing" + from lib.dataset.processing import TfPreprocessTransform + transform = TfPreprocessTransform( + is_training=is_training, size=img_size, interpolation=interpolation) + else: + if is_training: + transform = transforms_imagenet_train( + img_size, + color_jitter=color_jitter, + auto_augment=auto_augment, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std, + re_prob=re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits, + separate=separate) + else: + assert not separate, "Separate transforms not supported for validation preprocessing" + transform = transforms_imagenet_eval( + img_size, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std, + crop_pct=crop_pct) + + return transform \ No newline at end of file diff --git a/examples/nas/cream/dataset/utils.py b/examples/nas/cream/dataset/utils.py new file mode 100755 index 0000000000..d1fc97519e --- /dev/null +++ b/examples/nas/cream/dataset/utils.py @@ -0,0 +1,303 @@ +import torch +import torchvision.transforms.functional as F +from PIL import Image +import warnings +import math +import random +import numpy as np + + +class ToNumpy: + + def __call__(self, pil_img): + np_img = np.array(pil_img, dtype=np.uint8) + if np_img.ndim < 3: + np_img = np.expand_dims(np_img, axis=-1) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return np_img + + +class ToTensor: + + def __init__(self, dtype=torch.float32): + self.dtype = dtype + + def __call__(self, pil_img): + np_img = np.array(pil_img, dtype=np.uint8) + if np_img.ndim < 3: + np_img = np.expand_dims(np_img, axis=-1) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return torch.from_numpy(np_img).to(dtype=self.dtype) + + +_pil_interpolation_to_str = { + Image.NEAREST: 'PIL.Image.NEAREST', + Image.BILINEAR: 'PIL.Image.BILINEAR', + Image.BICUBIC: 'PIL.Image.BICUBIC', + Image.LANCZOS: 'PIL.Image.LANCZOS', + Image.HAMMING: 'PIL.Image.HAMMING', + Image.BOX: 'PIL.Image.BOX', +} + + +def _pil_interp(method): + if method == 'bicubic': + return Image.BICUBIC + elif method == 'lanczos': + return Image.LANCZOS + elif method == 'hamming': + return Image.HAMMING + else: + # default bilinear, do we want to allow nearest? + return Image.BILINEAR + + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +class RandomResizedCropAndInterpolation: + """Crop the given PIL Image to random size and aspect ratio with random interpolation. + A crop of random size (default: of 0.08 to 1.0) of the original size and a random + aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop + is finally resized to given size. + This is popularly used to train the Inception networks. + Args: + size: expected output size of each edge + scale: range of size of the origin size cropped + ratio: range of aspect ratio of the origin aspect ratio cropped + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), + interpolation='bilinear'): + if isinstance(size, tuple): + self.size = size + else: + self.size = (size, size) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + warnings.warn("range should be of kind (min, max)") + + if interpolation == 'random': + self.interpolation = _RANDOM_INTERPOLATION + else: + self.interpolation = _pil_interp(interpolation) + self.scale = scale + self.ratio = ratio + + @staticmethod + def get_params(img, scale, ratio): + """Get parameters for ``crop`` for a random sized crop. + Args: + img (PIL Image): Image to be cropped. + scale (tuple): range of size of the origin size cropped + ratio (tuple): range of aspect ratio of the origin aspect ratio cropped + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + area = img.size[0] * img.size[1] + + for attempt in range(10): + target_area = random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if w <= img.size[0] and h <= img.size[1]: + i = random.randint(0, img.size[1] - h) + j = random.randint(0, img.size[0] - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = img.size[0] / img.size[1] + if in_ratio < min(ratio): + w = img.size[0] + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = img.size[1] + w = int(round(h * max(ratio))) + else: # whole image + w = img.size[0] + h = img.size[1] + i = (img.size[1] - h) // 2 + j = (img.size[0] - w) // 2 + return i, j, h, w + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be cropped and resized. + Returns: + PIL Image: Randomly cropped and resized image. + """ + i, j, h, w = self.get_params(img, self.scale, self.ratio) + if isinstance(self.interpolation, (tuple, list)): + interpolation = random.choice(self.interpolation) + else: + interpolation = self.interpolation + return F.resized_crop(img, i, j, h, w, self.size, interpolation) + + def __repr__(self): + if isinstance(self.interpolation, (tuple, list)): + interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation]) + else: + interpolate_str = _pil_interpolation_to_str[self.interpolation] + format_string = self.__class__.__name__ + '(size={0}'.format(self.size) + format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) + format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) + format_string += ', interpolation={0})'.format(interpolate_str) + return format_string + + +def _get_pixels(per_pixel, rand_color, patch_size, dtype=torch.float32, device='cuda'): + # NOTE I've seen CUDA illegal memory access errors being caused by the normal_() + # paths, flip the order so normal is run on CPU if this becomes a problem + # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508 + if per_pixel: + return torch.empty(patch_size, dtype=dtype, device=device).normal_() + elif rand_color: + return torch.empty((patch_size[0], 1, 1), dtype=dtype, device=device).normal_() + else: + return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) + +class RandomErasing: + """ Randomly selects a rectangle region in an image and erases its pixels. + 'Random Erasing Data Augmentation' by Zhong et al. + See https://arxiv.org/pdf/1708.04896.pdf + This variant of RandomErasing is intended to be applied to either a batch + or single image tensor after it has been normalized by dataset mean and std. + Args: + probability: Probability that the Random Erasing operation will be performed. + min_area: Minimum percentage of erased area wrt input image area. + max_area: Maximum percentage of erased area wrt input image area. + min_aspect: Minimum aspect ratio of erased area. + mode: pixel color mode, one of 'const', 'rand', or 'pixel' + 'const' - erase block is constant color of 0 for all channels + 'rand' - erase block is same per-channel random (normal) color + 'pixel' - erase block is per-pixel random (normal) color + max_count: maximum number of erasing blocks per image, area per box is scaled by count. + per-image count is randomly chosen between 1 and this value. + """ + + def __init__( + self, + probability=0.5, min_area=0.02, max_area=1/3, min_aspect=0.3, max_aspect=None, + mode='const', min_count=1, max_count=None, num_splits=0, device='cuda'): + self.probability = probability + self.min_area = min_area + self.max_area = max_area + max_aspect = max_aspect or 1 / min_aspect + self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) + self.min_count = min_count + self.max_count = max_count or min_count + self.num_splits = num_splits + mode = mode.lower() + self.rand_color = False + self.per_pixel = False + if mode == 'rand': + self.rand_color = True # per block random normal + elif mode == 'pixel': + self.per_pixel = True # per pixel random normal + else: + assert not mode or mode == 'const' + self.device = device + + def _erase(self, img, chan, img_h, img_w, dtype): + if random.random() > self.probability: + return + area = img_h * img_w + count = self.min_count if self.min_count == self.max_count else \ + random.randint(self.min_count, self.max_count) + for _ in range(count): + for attempt in range(10): + target_area = random.uniform(self.min_area, self.max_area) * area / count + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + img[:, top:top + h, left:left + w] = _get_pixels( + self.per_pixel, self.rand_color, (chan, h, w), + dtype=dtype, device=self.device) + break + + def __call__(self, input): + if len(input.size()) == 3: + self._erase(input, *input.size(), input.dtype) + else: + batch_size, chan, img_h, img_w = input.size() + # skip first slice of batch if num_splits is set (for clean portion of samples) + batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0 + for i in range(batch_start, batch_size): + self._erase(input[i], chan, img_h, img_w, input.dtype) + return input + +def resolve_data_config(args, default_cfg={}, model=None, verbose=True): + DEFAULT_CROP_PCT = 0.875 + IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) + IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) + new_config = {} + default_cfg = default_cfg + if not default_cfg and model is not None and hasattr(model, 'default_cfg'): + default_cfg = model.default_cfg + + # Resolve input/image size + in_chans = 3 + if 'chans' in args and args['chans'] is not None: + in_chans = args['chans'] + + input_size = (in_chans, 224, 224) + if 'input_size' in args and args['input_size'] is not None: + assert isinstance(args['input_size'], (tuple, list)) + assert len(args['input_size']) == 3 + input_size = tuple(args['input_size']) + in_chans = input_size[0] # input_size overrides in_chans + elif 'img_size' in args and args['img_size'] is not None: + assert isinstance(args['img_size'], int) + input_size = (in_chans, args['img_size'], args['img_size']) + elif 'input_size' in default_cfg: + input_size = default_cfg['input_size'] + new_config['input_size'] = input_size + + # resolve interpolation method + new_config['interpolation'] = 'bicubic' + if 'interpolation' in args and args['interpolation']: + new_config['interpolation'] = args['interpolation'] + elif 'interpolation' in default_cfg: + new_config['interpolation'] = default_cfg['interpolation'] + + # resolve dataset + model mean for normalization + new_config['mean'] = IMAGENET_DEFAULT_MEAN + if 'mean' in args and args['mean'] is not None: + mean = tuple(args['mean']) + if len(mean) == 1: + mean = tuple(list(mean) * in_chans) + else: + assert len(mean) == in_chans + new_config['mean'] = mean + elif 'mean' in default_cfg: + new_config['mean'] = default_cfg['mean'] + + # resolve dataset + model std deviation for normalization + new_config['std'] = IMAGENET_DEFAULT_STD + if 'std' in args and args['std'] is not None: + std = tuple(args['std']) + if len(std) == 1: + std = tuple(list(std) * in_chans) + else: + assert len(std) == in_chans + new_config['std'] = std + elif 'std' in default_cfg: + new_config['std'] = default_cfg['std'] + + # resolve default crop percentage + new_config['crop_pct'] = DEFAULT_CROP_PCT + if 'crop_pct' in args and args['crop_pct'] is not None: + new_config['crop_pct'] = args['crop_pct'] + elif 'crop_pct' in default_cfg: + new_config['crop_pct'] = default_cfg['crop_pct'] + + return new_config \ No newline at end of file diff --git a/examples/nas/cream/distributed_test.sh b/examples/nas/cream/distributed_test.sh new file mode 100755 index 0000000000..50a2ad7bb6 --- /dev/null +++ b/examples/nas/cream/distributed_test.sh @@ -0,0 +1,4 @@ +#!/bin/bash +NUM_PROC=$1 +shift +python -m torch.distributed.launch --nproc_per_node=$NUM_PROC ./examples/nas/cream/test.py "$@" diff --git a/examples/nas/cream/distributed_train.sh b/examples/nas/cream/distributed_train.sh new file mode 100755 index 0000000000..6547b08537 --- /dev/null +++ b/examples/nas/cream/distributed_train.sh @@ -0,0 +1,4 @@ +#!/bin/bash +NUM_PROC=$1 +shift +python -m torch.distributed.launch --nproc_per_node=$NUM_PROC ./examples/nas/cream/supernet.py "$@" diff --git a/examples/nas/cream/models/__init__.py b/examples/nas/cream/models/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/examples/nas/cream/models/builder.py b/examples/nas/cream/models/builder.py new file mode 100755 index 0000000000..013d5dc42b --- /dev/null +++ b/examples/nas/cream/models/builder.py @@ -0,0 +1,392 @@ +import torch +import logging +import math +import re +from collections.__init__ import OrderedDict +from copy import deepcopy +import torch.nn as nn + +from models.utils import * +from models.units import * +from models.utils import _parse_ksize + +def _decode_block_str(block_str): + """ Decode block definition string + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + noskip = False + for op in ops: + # string options being checked on individual basis, combine if they grow + if op == 'noskip': + noskip = True + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = nn.ReLU + elif v == 'r6': + value = nn.ReLU6 + elif v == 'sw': + value = Swish + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be used + act_layer = options['n'] if 'n' in options else None + exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 + fake_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def + + num_repeat = int(options['r']) + # each type of block has different valid arguments, fill accordingly + if block_type == 'ir': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + noskip=noskip, + ) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or noskip, + ) + elif block_type == 'cn': + block_args = dict( + block_type=block_type, + kernel_size=int(options['k']), + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + ) + else: + assert False, 'Unknown block type (%s)' % block_type + + return block_args, num_repeat + +def modify_block_args(block_args, kernel_size, exp_ratio): + # kernel_size: 3,5,7 + # exp_ratio: 4,6 + block_type = block_args['block_type'] + # each type of block has different valid arguments, fill accordingly + if block_type == 'cn': + block_args['kernel_size'] = kernel_size + elif block_type == 'er': + block_args['exp_kernel_size'] = kernel_size + else: + block_args['dw_kernel_size'] = kernel_size + + if block_type == 'ir' or block_type == 'er': + block_args['exp_ratio'] = exp_ratio + return block_args + +def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + +def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1): + arch_args = [] + for stack_idx, block_strings in enumerate(arch_def): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = _decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + stack_args.append(ba) + repeats.append(rep) + arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc)) + return arch_args + +class ChildNetBuilder: + """ Build Trunk Blocks + """ + def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None, + output_stride=32, pad_type='', act_layer=None, se_kwargs=None, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0., feature_location='', + verbose=False): + self.channel_multiplier = channel_multiplier + self.channel_divisor = channel_divisor + self.channel_min = channel_min + self.output_stride = output_stride + self.pad_type = pad_type + self.act_layer = act_layer + self.se_kwargs = se_kwargs + self.norm_layer = norm_layer + self.norm_kwargs = norm_kwargs + self.drop_path_rate = drop_path_rate + self.feature_location = feature_location + assert feature_location in ('pre_pwl', 'post_exp', '') + self.verbose = verbose + + # state updated during build, consumed by model + self.in_chs = None + self.features = OrderedDict() + + def _round_channels(self, chs): + return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min) + + def _make_block(self, ba, block_idx, block_count): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self._round_channels(ba['out_chs']) + if 'fake_in_chs' in ba and ba['fake_in_chs']: + # FIXME this is a hack to work around mismatch in origin impl input filters + ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) + ba['norm_layer'] = self.norm_layer + ba['norm_kwargs'] = self.norm_kwargs + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + if bt == 'ir': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + logging.info(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba))) + block = InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + logging.info(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba))) + block = DepthwiseSeparableConv(**ba) + elif bt == 'cn': + if self.verbose: + logging.info(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba))) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + if self.verbose: + logging.info('Building model trunk with %d stages...' % len(model_block_args)) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + feature_idx = 0 + stages = [] + # outer list of block_args defines the stacks ('stages' by some conventions) + for stage_idx, stage_block_args in enumerate(model_block_args): + last_stack = stage_idx == (len(model_block_args) - 1) + if self.verbose: + logging.info('Stack: {}'.format(stage_idx)) + assert isinstance(stage_block_args, list) + + blocks = [] + # each stack (stage) contains a list of block arguments + for block_idx, block_args in enumerate(stage_block_args): + last_block = block_idx == (len(stage_block_args) - 1) + extract_features = '' # No features extracted + if self.verbose: + logging.info(' Block: {}'.format(block_idx)) + + # Sort out stride, dilation, and feature extraction details + assert block_args['stride'] in (1, 2) + if block_idx >= 1: + # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + do_extract = False + if self.feature_location == 'pre_pwl': + if last_block: + next_stage_idx = stage_idx + 1 + if next_stage_idx >= len(model_block_args): + do_extract = True + else: + do_extract = model_block_args[next_stage_idx][0]['stride'] > 1 + elif self.feature_location == 'post_exp': + if block_args['stride'] > 1 or (last_stack and last_block) : + do_extract = True + if do_extract: + extract_features = self.feature_location + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + if self.verbose: + logging.info(' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride)) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + # create the block + block = self._make_block(block_args, total_block_idx, total_block_count) + blocks.append(block) + + # stash feature module name and channel info for model feature extraction + if extract_features: + feature_module = block.feature_module(extract_features) + if feature_module: + feature_module = 'blocks.{}.{}.'.format(stage_idx, block_idx) + feature_module + feature_channels = block.feature_channels(extract_features) + self.features[feature_idx] = dict( + name=feature_module, + num_chs=feature_channels + ) + feature_idx += 1 + + total_block_idx += 1 # incr global block idx (across all stacks) + stages.append(nn.Sequential(*blocks)) + return stages + +def _init_weight_goog(m, n='', fix_group_fanout=True, last_bn=None): + """ Weight initialization as per Tensorflow official implementations. + Args: + m (nn.Module): module to init + n (str): module name + fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs + Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: + * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + """ + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer( + lambda w: w.data.normal_(0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + if n in last_bn: + m.weight.data.zero_() + m.bias.data.zero_() + else: + m.weight.data.fill_(1.0) + m.bias.data.zero_() + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + m.weight.data.uniform_(-init_range, init_range) + m.bias.data.zero_() + + +def efficientnet_init_weights(model: nn.Module, init_fn=None, zero_gamma=False): + last_bn = [] + if zero_gamma: + prev_n = '' + for n, m in model.named_modules(): + if isinstance(m, nn.BatchNorm2d): + if ''.join(prev_n.split('.')[:-1]) != ''.join(n.split('.')[:-1]): + last_bn.append(prev_n) + prev_n = n + last_bn.append(prev_n) + + init_fn = init_fn or _init_weight_goog + for n, m in model.named_modules(): + init_fn(m, n, last_bn=last_bn) \ No newline at end of file diff --git a/examples/nas/cream/models/hbuilder.py b/examples/nas/cream/models/hbuilder.py new file mode 100755 index 0000000000..a09c344ad7 --- /dev/null +++ b/examples/nas/cream/models/hbuilder.py @@ -0,0 +1,417 @@ +import torch +import logging +import math +import re +from collections.__init__ import OrderedDict +from copy import deepcopy +import torch.nn as nn + +from models.utils import * +from models.units import * +from models.utils import _parse_ksize + +from nni.nas.pytorch import mutables + +def _decode_block_str(block_str): + """ Decode block definition string + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + noskip = False + for op in ops: + # string options being checked on individual basis, combine if they grow + if op == 'noskip': + noskip = True + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = nn.ReLU + elif v == 'r6': + value = nn.ReLU6 + elif v == 'sw': + value = Swish + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be used + act_layer = options['n'] if 'n' in options else None + exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 + fake_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def + + num_repeat = int(options['r']) + # each type of block has different valid arguments, fill accordingly + if block_type == 'ir': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + noskip=noskip, + ) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or noskip, + ) + elif block_type == 'er': + block_args = dict( + block_type=block_type, + exp_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + fake_in_chs=fake_in_chs, + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + noskip=noskip, + ) + elif block_type == 'cn': + block_args = dict( + block_type=block_type, + kernel_size=int(options['k']), + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + ) + else: + assert False, 'Unknown block type (%s)' % block_type + + return block_args, num_repeat + +def modify_block_args(block_args, kernel_size, exp_ratio): + # kernel_size: 3,5,7 + # exp_ratio: 4,6 + block_type = block_args['block_type'] + # each type of block has different valid arguments, fill accordingly + if block_type == 'cn': + block_args['kernel_size'] = kernel_size + elif block_type == 'er': + block_args['exp_kernel_size'] = kernel_size + else: + block_args['dw_kernel_size'] = kernel_size + + if block_type == 'ir' or block_type == 'er': + block_args['exp_ratio'] = exp_ratio + return block_args + +def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + + +def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1): + arch_args = [] + for stack_idx, block_strings in enumerate(arch_def): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = _decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + stack_args.append(ba) + repeats.append(rep) + arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc)) + return arch_args + + +class SuperNetBuilder: + """ Build Trunk Blocks + """ + def __init__(self, choices, channel_multiplier=1.0, channel_divisor=8, channel_min=None, + output_stride=32, pad_type='', act_layer=None, se_kwargs=None, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0., feature_location='', + verbose=False, resunit=False, dil_conv=False): + + # dict + # choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} + self.choices = [[x,y] for x in choices['kernel_size'] for y in choices['exp_ratio']] + self.choices_num = len(self.choices)-1 + self.channel_multiplier = channel_multiplier + self.channel_divisor = channel_divisor + self.channel_min = channel_min + self.output_stride = output_stride + self.pad_type = pad_type + self.act_layer = act_layer + self.se_kwargs = se_kwargs + self.norm_layer = norm_layer + self.norm_kwargs = norm_kwargs + self.drop_path_rate = drop_path_rate + self.feature_location = feature_location + assert feature_location in ('pre_pwl', 'post_exp', '') + self.verbose = verbose + self.resunit = resunit + self.dil_conv = dil_conv + + # state updated during build, consumed by model + self.in_chs = None + + def _round_channels(self, chs): + return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min) + + def _make_block(self, ba, choice_idx, block_idx, block_count, resunit=False, dil_conv=False): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self._round_channels(ba['out_chs']) + if 'fake_in_chs' in ba and ba['fake_in_chs']: + # FIXME this is a hack to work around mismatch in origin impl input filters + ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) + ba['norm_layer'] = self.norm_layer + ba['norm_kwargs'] = self.norm_kwargs + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + if bt == 'ir': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + logging.info(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba))) + block = InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + logging.info(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba))) + block = DepthwiseSeparableConv(**ba) + elif bt == 'cn': + if self.verbose: + logging.info(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba))) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + if choice_idx == self.choice_num-1: + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + if self.verbose: + logging.info('Building model trunk with %d stages...' % len(model_block_args)) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + feature_idx = 0 + stages = [] + # outer list of block_args defines the stacks ('stages' by some conventions) + for stage_idx, stage_block_args in enumerate(model_block_args): + last_stack = stage_idx == (len(model_block_args) - 1) + if self.verbose: + logging.info('Stack: {}'.format(stage_idx)) + assert isinstance(stage_block_args, list) + + # blocks = [] + # each stack (stage) contains a list of block arguments + for block_idx, block_args in enumerate(stage_block_args): + last_block = block_idx == (len(stage_block_args) - 1) + if self.verbose: + logging.info(' Block: {}'.format(block_idx)) + + # Sort out stride, dilation, and feature extraction details + assert block_args['stride'] in (1, 2) + if block_idx >= 1: + # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + if self.verbose: + logging.info(' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride)) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + + if stage_idx==0 or stage_idx==6: + self.choice_num = 1 + else: + self.choice_num = len(self.choices) + + if self.dil_conv: + self.choice_num += 2 + + choice_blocks = [] + block_args_copy = deepcopy(block_args) + if self.choice_num == 1: + # create the block + block = self._make_block(block_args, 0, total_block_idx, total_block_count) + choice_blocks.append(block) + else: + for choice_idx, choice in enumerate(self.choices): + # create the block + block_args = deepcopy(block_args_copy) + block_args = modify_block_args(block_args, choice[0], choice[1]) + block = self._make_block(block_args, choice_idx, total_block_idx, total_block_count) + choice_blocks.append(block) + if self.dil_conv: + block_args = deepcopy(block_args_copy) + block_args = modify_block_args(block_args, 3, 0) + block = self._make_block(block_args, self.choice_num - 2, total_block_idx, total_block_count, + resunit=self.resunit, dil_conv=self.dil_conv) + choice_blocks.append(block) + + block_args = deepcopy(block_args_copy) + block_args = modify_block_args(block_args, 5, 0) + block = self._make_block(block_args, self.choice_num - 1, total_block_idx, total_block_count, + resunit=self.resunit, dil_conv=self.dil_conv) + choice_blocks.append(block) + + if self.resunit: + from models.resunit import get_Bottleneck + block = get_Bottleneck(block.conv_pw.in_channels, + block.conv_pwl.out_channels, + block.conv_dw.stride[0]) + choice_blocks.append(block) + + choice_block = mutables.LayerChoice(choice_blocks) + stages.append(choice_block) + # create the block + # block = self._make_block(block_args, total_block_idx, total_block_count) + total_block_idx += 1 # incr global block idx (across all stacks) + + # stages.append(blocks) + return stages + + +def _init_weight_goog(m, n='', fix_group_fanout=True): + """ Weight initialization as per Tensorflow official implementations. + Args: + m (nn.Module): module to init + n (str): module name + fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs + Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: + * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + """ + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer( + lambda w: w.data.normal_(0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + m.weight.data.uniform_(-init_range, init_range) + m.bias.data.zero_() + + +def efficientnet_init_weights(model: nn.Module, init_fn=None): + init_fn = init_fn or _init_weight_goog + for n, m in model.named_modules(): + init_fn(m, n) diff --git a/examples/nas/cream/models/hypernet.py b/examples/nas/cream/models/hypernet.py new file mode 100755 index 0000000000..1e47775a04 --- /dev/null +++ b/examples/nas/cream/models/hypernet.py @@ -0,0 +1,307 @@ +import torch +import torch.nn as nn +from torch.nn import functional as F + +from nni.nas.pytorch import mutables +from models.hbuilder import * + +DEFAULT_CROP_PCT = 0.875 +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + +_DEBUG = False + + +class SuperNet(nn.Module): + + def __init__(self, block_args, choices, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, + head_bias=True, + channel_multiplier=1.0, pad_type='', act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0., slice=4, + se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, global_pool='avg', resunit=False, + dil_conv=False): + super(SuperNet, self).__init__() + + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + self._in_chs = in_chans + + # Stem + stem_size = round_channels(stem_size, channel_multiplier) + self.conv_stem = create_conv2d(self._in_chs, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size, **norm_kwargs) + self.act1 = act_layer(inplace=True) + self._in_chs = stem_size + + # Middle stages (IR/ER/DS Blocks) + builder = SuperNetBuilder( + choices, channel_multiplier, 8, None, 32, pad_type, act_layer, se_kwargs, + norm_layer, norm_kwargs, drop_path_rate, verbose=_DEBUG, resunit=resunit, dil_conv=dil_conv) + # self.blocks = nn.ModuleList(*builder(self._in_chs, block_args)) + blocks = builder(self._in_chs, block_args) + self.blocks = nn.Sequential(*blocks) + self._in_chs = builder.in_chs + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type, bias=head_bias) + self.act2 = act_layer(inplace=True) + + # Classifier + self.classifier = nn.Linear(self.num_features * self.global_pool.feat_mult(), self.num_classes) + + self.meta_layer = nn.Linear(self.num_classes * slice, 1) + efficientnet_init_weights(self) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.num_classes = num_classes + self.classifier = nn.Linear( + self.num_features * self.global_pool.feat_mult(), num_classes) if self.num_classes else None + + def forward_features(self, x, cand): + # architecture = [[0], [], [], [], [], [0]] + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if cand is not None: + pass # x = self.blocks(x) + else: + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x, cand=None): + x = self.forward_features(x, cand) + x = x.flatten(1) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + def forward_meta(self, features): + return self.meta_layer(features.view(1, -1)) + + def rand_parameters(self, architecture, meta=False): + for name, param in self.named_parameters(recurse=True): + if 'meta' in name and meta: + yield param + elif 'blocks' not in name and 'meta' not in name and (not meta): + yield param + + if not meta: + for layer, layer_arch in zip(self.blocks, architecture.keys()): + for choice_idx, choice in enumerate(architecture[layer_arch]): + if choice: + for name, param in layer[choice_idx].named_parameters(recurse=True): + yield param + + +def search_for_layer(flops_op_dict, arch_def, flops_minimum, flops_maximum): + sta_num = [1, 1, 1, 1, 1] + order = [2, 3, 4, 1, 0, 2, 3, 4, 1, 0] + limits = [3, 3, 3, 2, 2, 4, 4, 4, 4, 4] + size_factor = 7 + base_min_flops = sum([flops_op_dict[i][0][0] for i in range(5)]) + base_max_flops = sum([flops_op_dict[i][5][0] for i in range(5)]) + + + if base_min_flops > flops_maximum: + while base_min_flops > flops_maximum and size_factor >= 2: + size_factor = size_factor - 1 + flops_minimum = flops_minimum * (7. / size_factor) + flops_maximum = flops_maximum * (7. / size_factor) + if size_factor < 2: + return None, None, None + elif base_max_flops < flops_minimum: + cur_ptr = 0 + while base_max_flops < flops_minimum and cur_ptr <= 9: + if sta_num[order[cur_ptr]] >= limits[cur_ptr]: + cur_ptr += 1 + continue + base_max_flops = base_max_flops + flops_op_dict[order[cur_ptr]][5][1] + sta_num[order[cur_ptr]] += 1 + if cur_ptr > 7 and base_max_flops < flops_minimum: + return None, None, None + + cur_ptr = 0 + while cur_ptr <= 9: + if sta_num[order[cur_ptr]] >= limits[cur_ptr]: + cur_ptr += 1 + continue + base_max_flops = base_max_flops + flops_op_dict[order[cur_ptr]][5][1] + if base_max_flops <= flops_maximum: + sta_num[order[cur_ptr]] += 1 + else: + break + + arch_def = [item[:i] for i, item in zip([1]+sta_num+[1], arch_def)] + # print(arch_def) + + return sta_num, arch_def, size_factor + +def _gen_supernet(flops_minimum=0, flops_maximum=600, **kwargs): + choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} + + num_features = 1280 + + # act_layer = HardSwish + act_layer = Swish + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_se0.25'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', + 'ir_r1_k3_s1_e4_c24_se0.25'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s1_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', + 'ir_r1_k5_s2_e4_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', + 'ir_r2_k3_s1_e4_c80_se0.25'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', + 'ir_r1_k3_s1_e6_c96_se0.25'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', + 'ir_r1_k5_s2_e6_c192_se0.25'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c320_se0.25'], + ] + + flops_op_dict = {} + for i in range(5): + flops_op_dict[i] = {} + flops_op_dict[0][0] = (21.828704, 18.820752) + flops_op_dict[0][1] = (32.669328, 28.16048) + flops_op_dict[0][2] = (25.039968, 23.637648) + flops_op_dict[0][3] = (37.486224, 35.385824) + flops_op_dict[0][4] = (29.856864, 30.862992) + flops_op_dict[0][5] = (44.711568, 46.22384) + flops_op_dict[1][0] = (11.808656, 11.86712) + flops_op_dict[1][1] = (17.68624, 17.780848) + flops_op_dict[1][2] = (13.01288, 13.87416) + flops_op_dict[1][3] = (19.492576, 20.791408) + flops_op_dict[1][4] = (14.819216, 16.88472) + flops_op_dict[1][5] = (22.20208, 25.307248) + flops_op_dict[2][0] = (8.198, 10.99632) + flops_op_dict[2][1] = (12.292848, 16.5172) + flops_op_dict[2][2] = (8.69976, 11.99984) + flops_op_dict[2][3] = (13.045488, 18.02248) + flops_op_dict[2][4] = (9.4524, 13.50512) + flops_op_dict[2][5] = (14.174448, 20.2804) + flops_op_dict[3][0] = (12.006112, 15.61632) + flops_op_dict[3][1] = (18.028752, 23.46096) + flops_op_dict[3][2] = (13.009632, 16.820544) + flops_op_dict[3][3] = (19.534032, 25.267296) + flops_op_dict[3][4] = (14.514912, 18.62688) + flops_op_dict[3][5] = (21.791952, 27.9768) + flops_op_dict[4][0] = (11.307456, 15.292416) + flops_op_dict[4][1] = (17.007072, 23.1504) + flops_op_dict[4][2] = (11.608512, 15.894528) + flops_op_dict[4][3] = (17.458656, 24.053568) + flops_op_dict[4][4] = (12.060096, 16.797696) + flops_op_dict[4][5] = (18.136032, 25.40832) + + sta_num, arch_def, size_factor = search_for_layer(flops_op_dict, arch_def, flops_minimum, flops_maximum) + + if sta_num is None or arch_def is None or size_factor is None: + raise ValueError('Invalid FLOPs Settings') + + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + choices=choices, + num_features=num_features, + stem_size=16, + # channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + act_layer=act_layer, + se_kwargs=dict(act_layer=nn.ReLU, gate_fn=hard_sigmoid, reduce_mid=True, divisor=8), + **kwargs, + ) + model = SuperNet(**model_kwargs) + return model, sta_num, size_factor + + +class Classifier(nn.Module): + def __init__(self, num_classes=1000): + super(Classifier, self).__init__() + self.classifier = nn.Linear(num_classes, num_classes) + + def forward(self, x): + return self.classifier(x) + + +if __name__ == '__main__': + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_se0.25'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', + 'ir_r1_k3_s1_e4_c24_se0.25'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s1_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', + 'ir_r1_k5_s2_e4_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', + 'ir_r2_k3_s1_e4_c80_se0.25'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', + 'ir_r1_k3_s1_e6_c96_se0.25'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', + 'ir_r1_k5_s2_e6_c192_se0.25'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c320_se0.25'], + ] + + flops_op_dict = {} + for i in range(5): + flops_op_dict[i] = {} + flops_op_dict[0][0] = (21.828704, 18.820752) + flops_op_dict[0][1] = (32.669328, 28.16048) + flops_op_dict[0][2] = (25.039968, 23.637648) + flops_op_dict[0][3] = (37.486224, 35.385824) + flops_op_dict[0][4] = (29.856864, 30.862992) + flops_op_dict[0][5] = (44.711568, 46.22384) + flops_op_dict[1][0] = (11.808656, 11.86712) + flops_op_dict[1][1] = (17.68624, 17.780848) + flops_op_dict[1][2] = (13.01288, 13.87416) + flops_op_dict[1][3] = (19.492576, 20.791408) + flops_op_dict[1][4] = (14.819216, 16.88472) + flops_op_dict[1][5] = (22.20208, 25.307248) + flops_op_dict[2][0] = (8.198, 10.99632) + flops_op_dict[2][1] = (12.292848, 16.5172) + flops_op_dict[2][2] = (8.69976, 11.99984) + flops_op_dict[2][3] = (13.045488, 18.02248) + flops_op_dict[2][4] = (9.4524, 13.50512) + flops_op_dict[2][5] = (14.174448, 20.2804) + flops_op_dict[3][0] = (12.006112, 15.61632) + flops_op_dict[3][1] = (18.028752, 23.46096) + flops_op_dict[3][2] = (13.009632, 16.820544) + flops_op_dict[3][3] = (19.534032, 25.267296) + flops_op_dict[3][4] = (14.514912, 18.62688) + flops_op_dict[3][5] = (21.791952, 27.9768) + flops_op_dict[4][0] = (11.307456, 15.292416) + flops_op_dict[4][1] = (17.007072, 23.1504) + flops_op_dict[4][2] = (11.608512, 15.894528) + flops_op_dict[4][3] = (17.458656, 24.053568) + flops_op_dict[4][4] = (12.060096, 16.797696) + flops_op_dict[4][5] = (18.136032, 25.40832) + + sta_num, arch_def, size_factor = search_for_layer(flops_op_dict, arch_def, 0, 20) + print(sta_num, size_factor) diff --git a/examples/nas/cream/models/model.py b/examples/nas/cream/models/model.py new file mode 100755 index 0000000000..b665e01545 --- /dev/null +++ b/examples/nas/cream/models/model.py @@ -0,0 +1,159 @@ +import torch +import torch.nn as nn +from torch.nn import functional as F + +from nni.nas.pytorch import mutables + +from models.builder import * + +DEFAULT_CROP_PCT = 0.875 +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +_DEBUG = False + + +class ChildNet(nn.Module): + + def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, head_bias=True, + channel_multiplier=1.0, pad_type='', act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0., + se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, global_pool='avg', pool_bn=False, zero_gamma=False): + super(ChildNet, self).__init__() + + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + self._in_chs = in_chans + self.pool_bn = pool_bn + + # Stem + stem_size = round_channels(stem_size, channel_multiplier) + self.conv_stem = create_conv2d(self._in_chs, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size, **norm_kwargs) + self.act1 = act_layer(inplace=True) + self._in_chs = stem_size + + # Middle stages (IR/ER/DS Blocks) + builder = ChildNetBuilder( + channel_multiplier, 8, None, 32, pad_type, act_layer, se_kwargs, + norm_layer, norm_kwargs, drop_path_rate, verbose=_DEBUG) + self.blocks = nn.Sequential(*builder(self._in_chs, block_args)) + # self.blocks = builder(self._in_chs, block_args) + self._in_chs = builder.in_chs + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type, bias=head_bias) + self.act2 = act_layer(inplace=True) + + # Classifier + self.classifier = nn.Linear(self.num_features * self.global_pool.feat_mult(), self.num_classes) + + if pool_bn: + self.pool_bn = nn.BatchNorm1d(1) + + efficientnet_init_weights(self, zero_gamma=zero_gamma) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.num_classes = num_classes + self.classifier = nn.Linear( + self.num_features * self.global_pool.feat_mult(), num_classes) if self.num_classes else None + + def forward_features(self, x): + # architecture = [[0], [], [], [], [], [0]] + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = x.flatten(1) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + if self.pool_bn: + x = torch.unsqueeze(x, 1) + x = self.pool_bn(x) + x = torch.squeeze(x) + return x + + +def modify_block_args(block_args, kernel_size, exp_ratio): + # kernel_size: 3,5,7 + # exp_ratio: 4,6 + block_type = block_args['block_type'] + # each type of block has different valid arguments, fill accordingly + if block_type == 'cn': + block_args['kernel_size'] = kernel_size + elif block_type == 'er': + block_args['exp_kernel_size'] = kernel_size + else: + block_args['dw_kernel_size'] = kernel_size + + if block_type == 'ir' or block_type == 'er': + block_args['exp_ratio'] = exp_ratio + return block_args + + +def _gen_childnet(arch_list, arch_def, **kwargs): + # arch_list = [[0], [], [], [], [], [0]] + choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} + choices_list = [[x,y] for x in choices['kernel_size'] for y in choices['exp_ratio']] + + num_features = 1280 + + # act_layer = HardSwish + act_layer = Swish + + new_arch = [] + # change to child arch_def + for i, (layer_choice, layer_arch) in enumerate(zip(arch_list, arch_def)): + if len(layer_arch) == 1: + new_arch.append(layer_arch) + continue + else: + new_layer = [] + for j, (block_choice, block_arch) in enumerate(zip(layer_choice, layer_arch)): + kernel_size, exp_ratio = choices_list[block_choice] + elements = block_arch.split('_') + block_arch = block_arch.replace(elements[2], 'k{}'.format(str(kernel_size))) + block_arch = block_arch.replace(elements[4], 'e{}'.format(str(exp_ratio))) + new_layer.append(block_arch) + new_arch.append(new_layer) + + model_kwargs = dict( + block_args=decode_arch_def(new_arch), + num_features=num_features, + stem_size=16, + # channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + act_layer=act_layer, + se_kwargs=dict(act_layer=nn.ReLU, gate_fn=hard_sigmoid, reduce_mid=True, divisor=8), + **kwargs, + ) + model = ChildNet(**model_kwargs) + return model + +# arch_list = [[0], [3, 2, 3, 3], [3, 2, 3, 1], [3, 0, 3, 2], [3, 3, 3, 3], [3, 3, 3, 3], [0]] +# model = _gen_childnet(arch_list, zero_gamma=True) + + diff --git a/examples/nas/cream/models/resunit.py b/examples/nas/cream/models/resunit.py new file mode 100755 index 0000000000..ede5940224 --- /dev/null +++ b/examples/nas/cream/models/resunit.py @@ -0,0 +1,92 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +def conv3x3(in_planes, out_planes, stride=1): + "3x3 convolution with padding" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=True) + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + + def __init__(self, inplanes, planes, stride=1, expansion=4): + super(Bottleneck, self).__init__() + planes = int(planes / expansion) + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=True) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * expansion, kernel_size=1, bias=True) + self.bn3 = nn.BatchNorm2d(planes * expansion) + self.relu = nn.ReLU(inplace=True) + self.stride = stride + self.expansion = expansion + if inplanes != planes * self.expansion: + self.downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * self.expansion, + kernel_size=1, stride=stride, bias=True), + nn.BatchNorm2d(planes * self.expansion), + ) + else: + self.downsample = None + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + +def get_Bottleneck(in_c, out_c, stride): + return Bottleneck(in_c, out_c, stride=stride) + +def get_BasicBlock(in_c, out_c, stride): + return BasicBlock(in_c, out_c, stride=stride) \ No newline at end of file diff --git a/examples/nas/cream/models/units.py b/examples/nas/cream/models/units.py new file mode 100755 index 0000000000..531fcc7f2d --- /dev/null +++ b/examples/nas/cream/models/units.py @@ -0,0 +1,355 @@ +import torch +import torch.nn as nn +import numpy as np + +from functools import partial +from models.utils import * + +def swish(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) + +class Swish(nn.Module): + def __init__(self, inplace: bool = False): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return swish(x, self.inplace) + +def sigmoid(x, inplace: bool = False): + return x.sigmoid_() if inplace else x.sigmoid() + +_SE_ARGS_DEFAULT = dict( + gate_fn=sigmoid, + act_layer=None, + reduce_mid=False, + divisor=1) + +def resolve_se_args(kwargs, in_chs, act_layer=None): + se_kwargs = kwargs.copy() if kwargs is not None else {} + # fill in args that aren't specified with the defaults + for k, v in _SE_ARGS_DEFAULT.items(): + se_kwargs.setdefault(k, v) + # some models, like MobilNetV3, calculate SE reduction chs from the containing block's mid_ch instead of in_ch + if not se_kwargs.pop('reduce_mid'): + se_kwargs['reduced_base_chs'] = in_chs + # act_layer override, if it remains None, the containing block's act_layer will be used + if se_kwargs['act_layer'] is None: + assert act_layer is not None + se_kwargs['act_layer'] = act_layer + return se_kwargs + +class Sigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(Sigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.sigmoid_() if self.inplace else x.sigmoid() + +class DepthwiseSeparableConv(nn.Module): + """ DepthwiseSeparable block + Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion + (factor of 1.0). This is an alternative to having a IR with an optional first pw conv. + """ + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, + pw_kernel_size=1, pw_act=False, se_ratio=0., se_kwargs=None, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0.): + super(DepthwiseSeparableConv, self).__init__() + norm_kwargs = norm_kwargs or {} + has_se = se_ratio is not None and se_ratio > 0. + self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip + self.has_pw_act = pw_act # activation after point-wise conv + self.drop_path_rate = drop_path_rate + + self.conv_dw = create_conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True) + self.bn1 = norm_layer(in_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + if has_se: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = None + + self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_layer(out_chs, **norm_kwargs) + self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': + # no expansion in this block, use depthwise, before SE + info = dict(module='act1', hook_type='forward', num_chs=self.conv_pw.in_channels) + elif location == 'depthwise': # after SE + info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) + else: # location == 'bottleneck' + info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels) + return info + + def forward(self, x): + residual = x + + x = self.conv_dw(x) + x = self.bn1(x) + x = self.act1(x) + + if self.se is not None: + x = self.se(x) + + x = self.conv_pw(x) + x = self.bn2(x) + x = self.act2(x) + + if self.has_residual: + x += residual + return x + +class CondConv2d(nn.Module): + """ Conditionally Parameterized Convolution + Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py + Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: + https://github.com/pytorch/pytorch/issues/17983 + """ + __constants__ = ['bias', 'in_channels', 'out_channels', 'dynamic_padding'] + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): + super(CondConv2d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = tup_pair(kernel_size) + self.stride = tup_pair(stride) + padding_val, is_padding_dynamic = get_padding_value( + padding, kernel_size, stride=stride, dilation=dilation) + self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript + self.padding = tup_pair(padding_val) + self.dilation = tup_pair(dilation) + self.groups = groups + self.num_experts = num_experts + + self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight_num_param = 1 + for wd in self.weight_shape: + weight_num_param *= wd + self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) + + if bias: + self.bias_shape = (self.out_channels,) + self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self): + init_weight = get_condconv_initializer( + partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) + init_weight(self.weight) + if self.bias is not None: + fan_in = np.prod(self.weight_shape[1:]) + bound = 1 / math.sqrt(fan_in) + init_bias = get_condconv_initializer( + partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) + init_bias(self.bias) + + def forward(self, x, routing_weights): + B, C, H, W = x.shape + weight = torch.matmul(routing_weights, self.weight) + new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight = weight.view(new_weight_shape) + bias = None + if self.bias is not None: + bias = torch.matmul(routing_weights, self.bias) + bias = bias.view(B * self.out_channels) + # move batch elements with channels so each batch element can be efficiently convolved with separate kernel + x = x.view(1, B * C, H, W) + if self.dynamic_padding: + out = conv2d_same( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + else: + out = F.conv2d( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) + + # Literal port (from TF definition) + # x = torch.split(x, 1, 0) + # weight = torch.split(weight, 1, 0) + # if self.bias is not None: + # bias = torch.matmul(routing_weights, self.bias) + # bias = torch.split(bias, 1, 0) + # else: + # bias = [None] * B + # out = [] + # for xi, wi, bi in zip(x, weight, bias): + # wi = wi.view(*self.weight_shape) + # if bi is not None: + # bi = bi.view(*self.bias_shape) + # out.append(self.conv_fn( + # xi, wi, bi, stride=self.stride, padding=self.padding, + # dilation=self.dilation, groups=self.groups)) + # out = torch.cat(out, 0) + return out + +class SqueezeExcite(nn.Module): + def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None, + act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1, **_): + super(SqueezeExcite, self).__init__() + self.gate_fn = gate_fn + reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor) + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True) + self.act1 = act_layer(inplace=True) + self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True) + + def forward(self, x): + x_se = self.avg_pool(x) + x_se = self.conv_reduce(x_se) + x_se = self.act1(x_se) + x_se = self.conv_expand(x_se) + x = x * self.gate_fn(x_se) + return x + +class ConvBnAct(nn.Module): + def __init__(self, in_chs, out_chs, kernel_size, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, norm_kwargs=None): + super(ConvBnAct, self).__init__() + norm_kwargs = norm_kwargs or {} + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type) + self.bn1 = norm_layer(out_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + def feature_info(self, location): + if location == 'expansion' or location == 'depthwise': + # no expansion or depthwise this block, use act after conv + info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels) + else: # location == 'bottleneck' + info = dict(module='', hook_type='', num_chs=self.conv.out_channels) + return info + + def forward(self, x): + x = self.conv(x) + x = self.bn1(x) + x = self.act1(x) + return x + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE and CondConv routing""" + + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, + exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, + se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, + conv_kwargs=None, drop_path_rate=0.): + super(InvertedResidual, self).__init__() + norm_kwargs = norm_kwargs or {} + conv_kwargs = conv_kwargs or {} + mid_chs = make_divisible(in_chs * exp_ratio) + has_se = se_ratio is not None and se_ratio > 0. + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Point-wise expansion + self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) + self.bn1 = norm_layer(mid_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Depth-wise convolution + self.conv_dw = create_conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, + padding=pad_type, depthwise=True, **conv_kwargs) + self.bn2 = norm_layer(mid_chs, **norm_kwargs) + self.act2 = act_layer(inplace=True) + + # Squeeze-and-excitation + if has_se: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = None + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) + self.bn3 = norm_layer(out_chs, **norm_kwargs) + + def feature_info(self, location): + if location == 'expansion': + info = dict(module='act1', hook_type='forward', num_chs=self.conv_pw.in_channels) + elif location == 'depthwise': # after SE + info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck' + info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + residual = x + + # Point-wise expansion + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.has_residual: + x += residual + + return x + +def hard_sigmoid(x, inplace: bool = False): + if inplace: + return x.add_(3.).clamp_(0., 6.).div_(6.) + else: + return F.relu6(x + 3.) / 6. + + +class HardSigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_sigmoid(x, self.inplace) + +class SelectAdaptivePool2d(nn.Module): + """Selectable global pooling layer with dynamic input kernel size + """ + def __init__(self, output_size=1, pool_type='avg', flatten=False): + super(SelectAdaptivePool2d, self).__init__() + self.output_size = output_size + self.pool_type = pool_type + self.flatten = flatten + self.pool = nn.AdaptiveAvgPool2d(output_size) + + def forward(self, x): + x = self.pool(x) + if self.flatten: + x = x.flatten(1) + return x + + def feat_mult(self): + return 1 + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + 'output_size=' + str(self.output_size) \ + + ', pool_type=' + self.pool_type + ')' \ No newline at end of file diff --git a/examples/nas/cream/models/utils.py b/examples/nas/cream/models/utils.py new file mode 100755 index 0000000000..0b7ad2c1d0 --- /dev/null +++ b/examples/nas/cream/models/utils.py @@ -0,0 +1,123 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np + +from typing import Tuple, Optional, List +from torch._six import container_abcs +from itertools import repeat + +def _ntuple(n): + def parse(x): + if isinstance(x, container_abcs.Iterable): + return x + return tuple(repeat(x, n)) + return +tup_pair = _ntuple(2) + +def get_same_padding(x: int, k: int, s: int, d: int): + return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) + +def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): + ih, iw = x.size()[-2:] + pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) + if pad_h > 0 or pad_w > 0: + x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) + return x + +def conv2d_same( + x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): + x = pad_same(x, weight.shape[-2:], stride, dilation) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + +def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + +def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + +def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: + dynamic = False + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = get_padding(kernel_size, **kwargs) + else: + # dynamic 'SAME' padding, has runtime/GPU memory overhead + padding = 0 + dynamic = True + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + padding = 0 + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = get_padding(kernel_size, **kwargs) + return padding, dynamic + +def get_condconv_initializer(initializer, num_experts, expert_shape): + def condconv_initializer(weight): + """CondConv initializer function.""" + num_params = np.prod(expert_shape) + if (len(weight.shape) != 2 or weight.shape[0] != num_experts or + weight.shape[1] != num_params): + raise (ValueError( + 'CondConv variables must have shape [num_experts, num_params]')) + for i in range(num_experts): + initializer(weight[i].view(expert_shape)) + return condconv_initializer + +def make_divisible(v, divisor=8, min_value=None): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + +def _parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + +def resolve_bn_args(kwargs): + bn_args = {} + bn_momentum = kwargs.pop('bn_momentum', None) + if bn_momentum is not None: + bn_args['momentum'] = bn_momentum + bn_eps = kwargs.pop('bn_eps', None) + if bn_eps is not None: + bn_args['eps'] = bn_eps + return bn_args + +def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None): + """Round number of filters based on depth multiplier.""" + if not multiplier: + return channels + channels *= multiplier + return make_divisible(channels, divisor, channel_min) + +def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + +def create_conv2d(in_chs, out_chs, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + assert 'groups' not in kwargs # only use 'depthwise' bool arg + depthwise = kwargs.pop('depthwise', False) + groups = out_chs if depthwise else 1 + m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + return m + diff --git a/examples/nas/cream/requirements.txt b/examples/nas/cream/requirements.txt new file mode 100755 index 0000000000..8e5a947d5b --- /dev/null +++ b/examples/nas/cream/requirements.txt @@ -0,0 +1,12 @@ +yacs +numpy==1.17 +opencv-python==4.0.1.24 +torchvision==0.2.1 +thop +git+https://github.com/sovrasov/flops-counter.pytorch.git +pillow==6.1.0 +torch==1.2 +timm==0.1.20 +tensorboardx==1.2 +git+https://github.com/Tramac/torchscope.git +tensorboard \ No newline at end of file diff --git a/examples/nas/cream/run.sh b/examples/nas/cream/run.sh new file mode 100755 index 0000000000..35c57ebe06 --- /dev/null +++ b/examples/nas/cream/run.sh @@ -0,0 +1,6 @@ +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ./examples/nas/cream/distributed_train.sh 8 \ +--data ../NIPS20_release/data/imagenet/ --sched spos_linear \ +--pool_size 10 --meta_sta_epoch 20 --update_iter 200 \ +--epochs 120 --batch-size 128 --warmup-epochs 0 \ +--lr 0.5 --opt-eps 0.001 \ +--color-jitter 0.06 --drop 0. -j 8 --num-classes 1000 --flops_minimum 0 --flops_maximum 600 diff --git a/examples/nas/cream/supernet.py b/examples/nas/cream/supernet.py new file mode 100755 index 0000000000..47c5294289 --- /dev/null +++ b/examples/nas/cream/supernet.py @@ -0,0 +1,389 @@ +import os +import argparse +import time +import numpy as np +import logging +import torch.nn as nn +from datetime import datetime +from copy import deepcopy + +try: + from apex import amp + from apex.parallel import DistributedDataParallel as DDP + from apex.parallel import convert_syncbn_model + has_apex = True +except ImportError: + from torch.nn.parallel import DistributedDataParallel as DDP + has_apex = False + +from dataset import Dataset, create_loader, resolve_data_config +from models.hypernet import _gen_supernet +from utils.flops_table import LatencyEst +from utils.helpers import * +from utils.EMA import ModelEma +from utils.saver import CheckpointSaver +from utils.loss import LabelSmoothingCrossEntropy +from utils.scheduler import create_scheduler +from torch.utils.tensorboard import SummaryWriter + +from nni.nas.pytorch.cream import CreamSupernetTrainer +from nni.nas.pytorch.cream import CreamSupernetTrainingMutator + +logger = logging.getLogger("nni.cream.supernet") + + +def add_weight_decay_supernet(model, args, weight_decay=1e-5, skip_list=()): + decay = [] + no_decay = [] + meta_layer_no_decay = [] + meta_layer_decay = [] + for name, param in model.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: + if 'meta_layer' in name: + meta_layer_no_decay.append(param) + else: + no_decay.append(param) + else: + if 'meta_layer' in name: + meta_layer_decay.append(param) + else: + decay.append(param) + return [ + {'params': no_decay, 'weight_decay': 0., 'lr': args.lr}, + {'params': decay, 'weight_decay': weight_decay, 'lr': args.lr}, + {'params': meta_layer_no_decay, 'weight_decay': 0., 'lr': args.meta_lr}, + {'params': meta_layer_decay, 'weight_decay': 0, 'lr': args.meta_lr}, + ] + +def create_optimizer_supernet(args, model, filter_bias_and_bn=True): + from torch import optim as optim + opt_lower = args.opt.lower() + weight_decay = args.weight_decay + if 'adamw' in opt_lower or 'radam' in opt_lower: + # Compensate for the way current AdamW and RAdam optimizers apply LR to the weight-decay + # I don't believe they follow the paper or original Torch7 impl which schedules weight + # decay based on the ratio of current_lr/initial_lr + weight_decay /= args.lr + if weight_decay and filter_bias_and_bn: + parameters = add_weight_decay_supernet(model, args, weight_decay) + weight_decay = 0. + else: + parameters = model.parameters() + + if 'fused' in opt_lower: + assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' + + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if opt_lower == 'sgd' or opt_lower == 'nesterov': + optimizer = optim.SGD( + parameters, momentum=args.momentum, weight_decay=weight_decay, nesterov=True) + elif opt_lower == 'momentum': + optimizer = optim.SGD( + parameters, momentum=args.momentum, weight_decay=weight_decay, nesterov=False) + elif opt_lower == 'adam': + optimizer = optim.Adam( + parameters, weight_decay=weight_decay, eps=args.opt_eps) + else: + assert False and "Invalid optimizer" + raise ValueError + + return optimizer + +def main(): + parser = argparse.ArgumentParser(description='Training') + # Dataset / Model parameters + parser.add_argument('--data', metavar='DIR', + help='path to dataset') + parser.add_argument('--model', default='hypernet', type=str, metavar='MODEL', + help='Name of model to train (default: "countception"') + parser.add_argument('--pretrained', action='store_true', default=False, + help='Start with pretrained version of specified network (if avail)') + parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH', + help='Initialize model from this checkpoint (default: none)') + parser.add_argument('--resume', default='', type=str, metavar='PATH', + help='Resume full model and optimizer state from checkpoint (default: none)') + parser.add_argument('--num-classes', type=int, default=1000, metavar='N', + help='number of label classes (default: 1000)') + parser.add_argument('--gp', default='avg', type=str, metavar='POOL', + help='Type of global pool, "avg", "max", "avgmax", "avgmaxc" (default: "avg")') + parser.add_argument('--img-size', type=int, default=None, metavar='N', + help='Image patch size (default: None => model default)') + parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', + help='Override mean pixel value of dataset') + parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', + help='Override std deviation of of dataset') + parser.add_argument('--interpolation', default='', type=str, metavar='NAME', + help='Image resize interpolation type (overrides model)') + parser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N', + help='input batch size for training (default: 32)') + parser.add_argument('--drop', type=float, default=0.0, metavar='DROP', + help='Dropout rate (default: 0.)') + # Optimizer parameters + parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "sgd"') + parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: 1e-8)') + parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') + parser.add_argument('--weight-decay', type=float, default=0.0001, + help='weight decay (default: 0.0001)') + # Learning rate schedule parameters + parser.add_argument('--sched', default='spos_linear', type=str, metavar='SCHEDULER', + help='LR scheduler (default: "step"') + parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') + parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR', + help='warmup learning rate (default: 0.0001)') + parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') + parser.add_argument('--epochs', type=int, default=120, metavar='N', + help='number of epochs to train (default: 2)') + parser.add_argument('--start-epoch', default=None, type=int, metavar='N', + help='manual epoch number (useful on restarts)') + parser.add_argument('--decay-epochs', type=int, default=15, metavar='N', + help='epoch interval to decay LR') + parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N', + help='epochs to warmup LR, if scheduler supports') + parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', + help='epochs to cooldown LR at min_lr, after cyclic schedule ends') + parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', + help='LR decay rate (default: 0.1)') + parser.add_argument('--grad', type=int, default=1, metavar='RATE', + help='LR decay rate (default: 0.1)') + # Augmentation parameters + parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', + help='Color jitter factor (default: 0.4)') + parser.add_argument('--reprob', type=float, default=0., metavar='PCT', + help='Random erase prob (default: 0.)') + parser.add_argument('--remode', type=str, default='const', + help='Random erase mode (default: "const")') + parser.add_argument('--mixup', type=float, default=0.0, + help='mixup alpha, mixup enabled if > 0. (default: 0.)') + parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N', + help='turn off mixup after this epoch, disabled if 0 (default: 0)') + parser.add_argument('--smoothing', type=float, default=0.1, + help='label smoothing (default: 0.1)') + # Batch norm parameters (only works with gen_efficientnet based models currently) + parser.add_argument('--bn-tf', action='store_true', default=False, + help='Use Tensorflow BatchNorm defaults for models that support it (default: False)') + parser.add_argument('--bn-momentum', type=float, default=None, + help='BatchNorm momentum override (if not None)') + parser.add_argument('--bn-eps', type=float, default=None, + help='BatchNorm epsilon override (if not None)') + # Model Exponential Moving Average + parser.add_argument('--model-ema', action='store_true', default=False, + help='Enable tracking moving average of model weights') + parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, + help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.') + parser.add_argument('--model-ema-decay', type=float, default=0.9998, + help='decay factor for model weights moving average (default: 0.9998)') + parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', + help='learning rate noise on/off epoch percentages') + parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', + help='learning rate noise limit percent (default: 0.67)') + parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', + help='learning rate noise std-dev (default: 1.0)') + # Misc + parser.add_argument('--seed', type=int, default=42, metavar='S', + help='random seed (default: 42)') + parser.add_argument('--log-interval', type=int, default=50, metavar='N', + help='how many batches to wait before logging training status') + parser.add_argument('-j', '--workers', type=int, default=4, metavar='N', + help='how many training processes to use (default: 1)') + parser.add_argument('--num-gpu', type=int, default=1, + help='Number of GPUS to use') + parser.add_argument("--local_rank", default=0, type=int) + parser.add_argument("--update_iter", default=1, type=int) + parser.add_argument("--slice", default=4, type=int) + parser.add_argument("--pool_size", default=10, type=int) + parser.add_argument('--resunit', action='store_true', default=False, + help='Start with pretrained version of specified network (if avail)') + parser.add_argument('--dil_conv', action='store_true', default=False, + help='Start with pretrained version of specified network (if avail)') + parser.add_argument('--tiny', action='store_true', default=False) + parser.add_argument('--flops_maximum', default=600, type=int) + parser.add_argument('--flops_minimum', default=0, type=int) + parser.add_argument('--pick_method', default='meta', type=str) + parser.add_argument('--meta_lr', default=1e-2, type=float) + parser.add_argument('--meta_sta_epoch', default=-1, type=int) + parser.add_argument('--how_to_prob', default='pre_prob', type=str) + parser.add_argument('--pre_prob', default=(0.05, 0.2, 0.05, 0.5, 0.05, 0.15), type=tuple) + args = parser.parse_args() + + seed = args.seed + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + np.random.seed(seed) + torch.backends.cudnn.deterministic = True + + args.distributed = False + if 'WORLD_SIZE' in os.environ: + args.distributed = int(os.environ['WORLD_SIZE']) > 1 + if args.distributed and args.num_gpu > 1: + logger.warning( + 'Using more than one GPU per process in distributed mode is not allowed. Setting num_gpu to 1.') + args.num_gpu = 1 + + args.device = 'cuda:0' + args.world_size = 1 + args.rank = 0 # global rank + if args.distributed: + args.num_gpu = 1 + args.device = 'cuda:%d' % args.local_rank + torch.cuda.set_device(args.local_rank) + import random + port = random.randint(0, 50000) + torch.distributed.init_process_group(backend='nccl', init_method='env://') # tcp://127.0.0.1:{}'.format(port), rank=args.local_rank, world_size=8) + args.world_size = torch.distributed.get_world_size() + args.rank = torch.distributed.get_rank() + assert args.rank >= 0 + + if args.distributed: + logging.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.' + % (args.rank, args.world_size)) + else: + logging.info('Training with a single process on %d GPUs.' % args.num_gpu) + + model, sta_num, size_factor = _gen_supernet( + flops_minimum=args.flops_minimum, + flops_maximum=args.flops_maximum, + num_classes=args.num_classes, + drop_rate=args.drop, + global_pool=args.gp, + resunit=args.resunit, + dil_conv=args.dil_conv, + slice=args.slice) + + if args.local_rank == 0: + print("Model Searched Using FLOPs {}".format(size_factor * 32)) + + data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) + if args.local_rank == 0: + logger.info(args) + + choice_num = 6 + if args.resunit: + choice_num += 1 + if args.dil_conv: + choice_num += 2 + + if args.local_rank == 0: + logger.info("Choice_num: {}".format(choice_num)) + + model_est = LatencyEst(model) + + if args.local_rank == 0: + logger.info('Model %s created, param count: %d' % + (args.model, sum([m.numel() for m in model.parameters()]))) + + # data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) + + # optionally resume from a checkpoint + optimizer_state = None + resume_epoch = None + if args.resume: + optimizer_state, resume_epoch = resume_checkpoint(model, args.resume) + + if args.num_gpu > 1: + if args.amp: + logging.warning( + 'AMP does not work well with nn.DataParallel, disabling. Use distributed mode for multi-GPU AMP.') + args.amp = False + model = nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda() + else: + model.cuda() + + optimizer = create_optimizer_supernet(args, model) + if optimizer_state is not None: + optimizer.load_state_dict(optimizer_state['optimizer']) + + if args.distributed: + if has_apex: + model = DDP(model, delay_allreduce=True) + else: + if args.local_rank == 0: + logger.info("Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.") + model = DDP(model, device_ids=[args.local_rank]) # can use device str in Torch >= 1.1 + # NOTE: EMA model does not need to be wrapped by DDP + + lr_scheduler, num_epochs = create_scheduler(args, optimizer) + + start_epoch = 0 + if args.start_epoch is not None: + # a specified start_epoch will always override the resume epoch + start_epoch = args.start_epoch + elif resume_epoch is not None: + start_epoch = resume_epoch + if start_epoch > 0: + lr_scheduler.step(start_epoch) + + if args.local_rank == 0: + logger.info('Scheduled epochs: {}'.format(num_epochs)) + + if args.tiny: + from dataset.tiny_imagenet import get_newimagenet + [loader_train, loader_eval], [train_sampler, test_sampler] = get_newimagenet(args.data, args.batch_size) + else: + train_dir = os.path.join(args.data, 'train') + if not os.path.exists(train_dir): + logger.error('Training folder does not exist at: {}'.format(train_dir)) + exit(1) + dataset_train = Dataset(train_dir) + + collate_fn = None + + loader_train = create_loader( + dataset_train, + input_size=data_config['input_size'], + batch_size=args.batch_size, + is_training=True, + re_prob=args.reprob, + re_mode=args.remode, + color_jitter=args.color_jitter, + interpolation='random', # FIXME cleanly resolve this? data_config['interpolation'], + mean=data_config['mean'], + std=data_config['std'], + num_workers=args.workers, + distributed=args.distributed, + collate_fn=collate_fn, + ) + + eval_dir = os.path.join(args.data, 'val') + if not os.path.isdir(eval_dir): + logger.error('Validation folder does not exist at: {}'.format(eval_dir)) + exit(1) + dataset_eval = Dataset(eval_dir) + + loader_eval = create_loader( + dataset_eval, + input_size=data_config['input_size'], + batch_size=4 * args.batch_size, + is_training=False, + interpolation=data_config['interpolation'], + mean=data_config['mean'], + std=data_config['std'], + num_workers=args.workers, + distributed=args.distributed, + ) + + criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda() + val_loss = nn.CrossEntropyLoss().cuda() + + mutator = CreamSupernetTrainingMutator(model, args.how_to_prob, args.pre_prob, choice_num, sta_num) + + trainer = CreamSupernetTrainer(model, criterion, optimizer, args.epochs, + train_loader=loader_train, valid_loader=loader_eval, + mutator=mutator, batch_size=args.batch_size, + log_frequency=args.log_interval, est=model_est, meta_sta_epoch=args.meta_sta_epoch, + update_iter=args.update_iter, slices=args.slice, pool_size=args.pool_size, + pick_method=args.pick_method, lr_scheduler=lr_scheduler, distributed=args.distributed, + local_rank=args.local_rank, val_loss=val_loss) + trainer.train() + + +if __name__ == '__main__': + main() + diff --git a/examples/nas/cream/test.py b/examples/nas/cream/test.py new file mode 100755 index 0000000000..8ee79d5381 --- /dev/null +++ b/examples/nas/cream/test.py @@ -0,0 +1,482 @@ +import os +import argparse +import time +import numpy as np +import logging +import torch.nn as nn +from datetime import datetime +from copy import deepcopy + +try: + from apex import amp + from apex.parallel import DistributedDataParallel as DDP + from apex.parallel import convert_syncbn_model + has_apex = True +except ImportError: + from torch.nn.parallel import DistributedDataParallel as DDP + has_apex = False + +from dataset import Dataset, create_loader, resolve_data_config +from models.model import _gen_childnet +import torch.distributed as dist +from utils.flops_table import LatencyEst +from utils.helpers import * +from utils.EMA import ModelEma +from utils.saver import CheckpointSaver +from utils.loss import LabelSmoothingCrossEntropy +from utils.scheduler import create_scheduler +from torch.utils.tensorboard import SummaryWriter + +from nni.nas.pytorch.cream import CreamSupernetTrainer +from nni.nas.pytorch.cream import CreamSupernetTrainingMutator + +logger = logging.getLogger("nni.cream.supernet") + +def add_weight_decay_supernet(model, args, weight_decay=1e-5, skip_list=()): + decay = [] + no_decay = [] + meta_layer_no_decay = [] + meta_layer_decay = [] + for name, param in model.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: + if 'meta_layer' in name: + meta_layer_no_decay.append(param) + else: + no_decay.append(param) + else: + if 'meta_layer' in name: + meta_layer_decay.append(param) + else: + decay.append(param) + return [ + {'params': no_decay, 'weight_decay': 0., 'lr': args.lr}, + {'params': decay, 'weight_decay': weight_decay, 'lr': args.lr}, + {'params': meta_layer_no_decay, 'weight_decay': 0., 'lr': args.meta_lr}, + {'params': meta_layer_decay, 'weight_decay': 0, 'lr': args.meta_lr}, + ] + +def create_optimizer_supernet(args, model, filter_bias_and_bn=True): + from torch import optim as optim + opt_lower = args.opt.lower() + weight_decay = args.weight_decay + if 'adamw' in opt_lower or 'radam' in opt_lower: + # Compensate for the way current AdamW and RAdam optimizers apply LR to the weight-decay + # I don't believe they follow the paper or original Torch7 impl which schedules weight + # decay based on the ratio of current_lr/initial_lr + weight_decay /= args.lr + if weight_decay and filter_bias_and_bn: + parameters = add_weight_decay_supernet(model, args, weight_decay) + weight_decay = 0. + else: + parameters = model.parameters() + + if 'fused' in opt_lower: + assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' + + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if opt_lower == 'sgd' or opt_lower == 'nesterov': + optimizer = optim.SGD( + parameters, momentum=args.momentum, weight_decay=weight_decay, nesterov=True) + elif opt_lower == 'momentum': + optimizer = optim.SGD( + parameters, momentum=args.momentum, weight_decay=weight_decay, nesterov=False) + elif opt_lower == 'adam': + optimizer = optim.Adam( + parameters, weight_decay=weight_decay, eps=args.opt_eps) + else: + assert False and "Invalid optimizer" + raise ValueError + + return optimizer + +def main(): + parser = argparse.ArgumentParser(description='Training') + # Dataset / Model parameters + parser.add_argument('--data', metavar='DIR', + help='path to dataset') + parser.add_argument('--model', default='hypernet', type=str, metavar='MODEL', + help='Name of model to train (default: "countception"') + parser.add_argument('--pretrained', action='store_true', default=False, + help='Start with pretrained version of specified network (if avail)') + parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH', + help='Initialize model from this checkpoint (default: none)') + parser.add_argument('--resume', default='', type=str, metavar='PATH', + help='Resume full model and optimizer state from checkpoint (default: none)') + parser.add_argument('--num-classes', type=int, default=1000, metavar='N', + help='number of label classes (default: 1000)') + parser.add_argument('--gp', default='avg', type=str, metavar='POOL', + help='Type of global pool, "avg", "max", "avgmax", "avgmaxc" (default: "avg")') + parser.add_argument('--img-size', type=int, default=None, metavar='N', + help='Image patch size (default: None => model default)') + parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', + help='Override mean pixel value of dataset') + parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', + help='Override std deviation of of dataset') + parser.add_argument('--interpolation', default='', type=str, metavar='NAME', + help='Image resize interpolation type (overrides model)') + parser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N', + help='input batch size for training (default: 32)') + parser.add_argument('--drop', type=float, default=0.0, metavar='DROP', + help='Dropout rate (default: 0.)') + # Optimizer parameters + parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "sgd"') + parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: 1e-8)') + parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') + parser.add_argument('--weight-decay', type=float, default=0.0001, + help='weight decay (default: 0.0001)') + # Learning rate schedule parameters + parser.add_argument('--sched', default='spos_linear', type=str, metavar='SCHEDULER', + help='LR scheduler (default: "step"') + parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') + parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR', + help='warmup learning rate (default: 0.0001)') + parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') + parser.add_argument('--epochs', type=int, default=120, metavar='N', + help='number of epochs to train (default: 2)') + parser.add_argument('--start-epoch', default=None, type=int, metavar='N', + help='manual epoch number (useful on restarts)') + parser.add_argument('--decay-epochs', type=int, default=15, metavar='N', + help='epoch interval to decay LR') + parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N', + help='epochs to warmup LR, if scheduler supports') + parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', + help='epochs to cooldown LR at min_lr, after cyclic schedule ends') + parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', + help='LR decay rate (default: 0.1)') + parser.add_argument('--grad', type=int, default=1, metavar='RATE', + help='LR decay rate (default: 0.1)') + # Augmentation parameters + parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', + help='Color jitter factor (default: 0.4)') + parser.add_argument('--reprob', type=float, default=0., metavar='PCT', + help='Random erase prob (default: 0.)') + parser.add_argument('--remode', type=str, default='const', + help='Random erase mode (default: "const")') + parser.add_argument('--mixup', type=float, default=0.0, + help='mixup alpha, mixup enabled if > 0. (default: 0.)') + parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N', + help='turn off mixup after this epoch, disabled if 0 (default: 0)') + parser.add_argument('--smoothing', type=float, default=0.1, + help='label smoothing (default: 0.1)') + # Batch norm parameters (only works with gen_efficientnet based models currently) + parser.add_argument('--bn-tf', action='store_true', default=False, + help='Use Tensorflow BatchNorm defaults for models that support it (default: False)') + parser.add_argument('--bn-momentum', type=float, default=None, + help='BatchNorm momentum override (if not None)') + parser.add_argument('--bn-eps', type=float, default=None, + help='BatchNorm epsilon override (if not None)') + # Model Exponential Moving Average + parser.add_argument('--model-ema', action='store_true', default=False, + help='Enable tracking moving average of model weights') + parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, + help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.') + parser.add_argument('--model-ema-decay', type=float, default=0.9998, + help='decay factor for model weights moving average (default: 0.9998)') + parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', + help='learning rate noise on/off epoch percentages') + parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', + help='learning rate noise limit percent (default: 0.67)') + parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', + help='learning rate noise std-dev (default: 1.0)') + # Misc + parser.add_argument('--seed', type=int, default=42, metavar='S', + help='random seed (default: 42)') + parser.add_argument('--log-interval', type=int, default=50, metavar='N', + help='how many batches to wait before logging training status') + parser.add_argument('-j', '--workers', type=int, default=4, metavar='N', + help='how many training processes to use (default: 1)') + parser.add_argument('--num-gpu', type=int, default=1, + help='Number of GPUS to use') + parser.add_argument("--local_rank", default=0, type=int) + parser.add_argument("--update_iter", default=1, type=int) + parser.add_argument("--slice", default=4, type=int) + parser.add_argument("--pool_size", default=10, type=int) + parser.add_argument('--resunit', action='store_true', default=False, + help='Start with pretrained version of specified network (if avail)') + parser.add_argument('--dil_conv', action='store_true', default=False, + help='Start with pretrained version of specified network (if avail)') + parser.add_argument('--tiny', action='store_true', default=False) + parser.add_argument('--flops_maximum', default=600, type=int) + parser.add_argument('--flops_minimum', default=0, type=int) + parser.add_argument('--pick_method', default='meta', type=str) + parser.add_argument('--meta_lr', default=1e-2, type=float) + parser.add_argument('--meta_sta_epoch', default=-1, type=int) + parser.add_argument('--model_selection', default=14, type=int) + parser.add_argument('--how_to_prob', default='pre_prob', type=str) + parser.add_argument('--pre_prob', default=(0.05, 0.2, 0.05, 0.5, 0.05, 0.15), type=tuple) + args = parser.parse_args() + + seed = args.seed + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + np.random.seed(seed) + torch.backends.cudnn.deterministic = True + + args.distributed = False + if 'WORLD_SIZE' in os.environ: + args.distributed = int(os.environ['WORLD_SIZE']) > 1 + if args.distributed and args.num_gpu > 1: + logger.warning( + 'Using more than one GPU per process in distributed mode is not allowed. Setting num_gpu to 1.') + args.num_gpu = 1 + + args.device = 'cuda:0' + args.world_size = 1 + args.rank = 0 # global rank + if args.distributed: + args.num_gpu = 1 + args.device = 'cuda:%d' % args.local_rank + torch.cuda.set_device(args.local_rank) + import random + port = random.randint(0, 50000) + torch.distributed.init_process_group(backend='nccl', init_method='env://') # tcp://127.0.0.1:{}'.format(port), rank=args.local_rank, world_size=8) + args.world_size = torch.distributed.get_world_size() + args.rank = torch.distributed.get_rank() + assert args.rank >= 0 + + if args.distributed: + logging.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.' + % (args.rank, args.world_size)) + else: + logging.info('Training with a single process on %d GPUs.' % args.num_gpu) + + if args.model_selection == 470: + arch_list = [[0], [3, 4, 3, 1], [3, 2, 3, 0], [3, 3, 3, 1], [3, 3, 3, 3], [3, 3, 3, 3], [0]] + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_se0.25'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', + 'ir_r1_k3_s1_e4_c24_se0.25'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s1_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', + 'ir_r1_k5_s2_e4_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', + 'ir_r2_k3_s1_e4_c80_se0.25'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', + 'ir_r1_k3_s1_e6_c96_se0.25'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', + 'ir_r1_k5_s2_e6_c192_se0.25'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c320_se0.25'], + ] + args.img_size = 224 + elif args.model_selection == 42: + arch_list = [[0], [3], [3, 1], [3, 1], [3, 3, 3], [3, 3], [0]] + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_se0.25'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_se0.25'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c320_se0.25'], + ] + args.img_size = 96 + elif args.model_selection == 14: + arch_list = [[0], [3], [3, 3], [3, 3], [3], [3], [0]] + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_se0.25'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_se0.25'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k3_s2_e4_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e4_c80_se0.25'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c96_se0.25'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e6_c192_se0.25'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c320_se0.25'], + ] + args.img_size = 64 + elif args.model_selection == 112: + arch_list = [[0], [3], [3, 3], [3, 3], [3, 3, 3], [3, 3], [0]] + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_se0.25'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_se0.25'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k3_s2_e4_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c320_se0.25'], + ] + args.img_size = 160 + elif args.model_selection == 285: + arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_se0.25'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_se0.25'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', + 'ir_r1_k3_s1_e6_c96_se0.25'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c320_se0.25'], + ] + args.img_size = 224 + elif args.model_selection == 600: + arch_list = [[0], [3, 3, 2, 3, 3], [3, 2, 3, 2, 3], [3, 2, 3, 2, 3], [3, 3, 2, 2, 3, 3], [3, 3, 2, 3, 3, 3], + [0]] + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_se0.25'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s2_e4_c24_se0.25', + 'ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s2_e4_c24_se0.25'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', + 'ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', + 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', + 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', + 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c320_se0.25'], + ] + args.img_size = 224 + + model = _gen_childnet( + arch_list, + arch_def, + num_classes=args.num_classes, + drop_rate=args.drop, + global_pool=args.gp) + + data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) + if args.local_rank == 0: + logger.info(args) + + if args.local_rank == 0: + logger.info('Model %s created, param count: %d' % + (args.model, sum([m.numel() for m in model.parameters()]))) + + # data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) + + if args.num_gpu > 1: + if args.amp: + logging.warning( + 'AMP does not work well with nn.DataParallel, disabling. Use distributed mode for multi-GPU AMP.') + args.amp = False + model = nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda() + else: + model.cuda() + + if args.distributed: + if has_apex: + model = DDP(model, delay_allreduce=True) + else: + if args.local_rank == 0: + logger.info("Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.") + model = DDP(model, device_ids=[args.local_rank]) # can use device str in Torch >= 1.1 + # NOTE: EMA model does not need to be wrapped by DDP + + model_ema = ModelEma( + model, + decay=args.model_ema_decay, + device='cpu' if args.model_ema_force_cpu else '', + resume=args.resume) + + if args.tiny: + from dataset.tiny_imagenet import get_newimagenet + [loader_train, loader_eval], [train_sampler, test_sampler] = get_newimagenet(args.data, args.batch_size) + else: + train_dir = os.path.join(args.data, 'train') + if not os.path.exists(train_dir): + logger.error('Training folder does not exist at: {}'.format(train_dir)) + exit(1) + + eval_dir = os.path.join(args.data, 'val') + if not os.path.isdir(eval_dir): + logger.error('Validation folder does not exist at: {}'.format(eval_dir)) + exit(1) + dataset_eval = Dataset(eval_dir) + + loader_eval = create_loader( + dataset_eval, + input_size=data_config['input_size'], + batch_size=4 * args.batch_size, + is_training=False, + interpolation=data_config['interpolation'], + mean=data_config['mean'], + std=data_config['std'], + num_workers=args.workers, + distributed=args.distributed, + ) + + def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + maxk = max(topk) + batch_size = target.size(0) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + return [correct[:k].view(-1).float().sum(0) * 100. / batch_size for k in topk] + + prec1_m = AverageMeter() + prec5_m = AverageMeter() + + def reduce_tensor(tensor, n): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= n + return rt + + model_ema.ema.eval() + + with torch.no_grad(): + for step, (x, y) in enumerate(loader_eval): + logits = model_ema.ema(x) + prec1, prec5 = accuracy(logits, y, topk=(1, 5)) + + prec1 = reduce_tensor(prec1, args.world_size) + prec5 = reduce_tensor(prec5, args.world_size) + + prec1_m.update(prec1.item(), logits.size(0)) + prec5_m.update(prec5.item(), logits.size(0)) + + if args.local_rank == 0: + logger.info("Prec1: %s Prec5: %s", prec1_m.avg, prec5_m.avg) + +if __name__ == '__main__': + main() + diff --git a/examples/nas/cream/test.sh b/examples/nas/cream/test.sh new file mode 100755 index 0000000000..a257ba5ea4 --- /dev/null +++ b/examples/nas/cream/test.sh @@ -0,0 +1,2 @@ +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ./examples/nas/cream/distributed_test.sh 8 \ +--data ~/data_local/imagenet --model_selection 285 --resume ~/data_local/nips_ckp/285m/model_best.pth.tar # 0.06 --drop 0. -j 8 --num-classes 1000 --flops_minimum 0 --flops_maximum 600 diff --git a/examples/nas/cream/utils/EMA.py b/examples/nas/cream/utils/EMA.py new file mode 100755 index 0000000000..e3c47a1fea --- /dev/null +++ b/examples/nas/cream/utils/EMA.py @@ -0,0 +1,66 @@ +import torch +import logging + +from copy import deepcopy +from collections import OrderedDict + +class ModelEma: + """ Model Exponential Moving Average + Keep a moving average of everything in the model state_dict (parameters and buffers). + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU. + """ + def __init__(self, model, decay=0.9999, device='', resume=''): + # make a copy of the model for accumulating moving average of weights + self.ema = deepcopy(model) + self.ema.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if device: + self.ema.to(device=device) + self.ema_has_module = hasattr(self.ema, 'module') + if resume: + self._load_checkpoint(resume) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def _load_checkpoint(self, checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + assert isinstance(checkpoint, dict) + if 'state_dict_ema' in checkpoint: + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict_ema'].items(): + # ema model may have been wrapped by DataParallel, and need module prefix + if self.ema_has_module: + name = 'module.' + k if not k.startswith('module') else k + else: + name = k + new_state_dict[name] = v + self.ema.load_state_dict(new_state_dict) + logging.info("Loaded state_dict_ema") + else: + logging.warning("Failed to find state_dict_ema, starting from loaded model weights") + + def update(self, model): + # correct a mismatch in state dict keys + needs_module = hasattr(model, 'module') and not self.ema_has_module + with torch.no_grad(): + msd = model.state_dict() + for k, ema_v in self.ema.state_dict().items(): + if needs_module: + k = 'module.' + k + model_v = msd[k].detach() + if self.device: + model_v = model_v.to(device=self.device) + ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) \ No newline at end of file diff --git a/examples/nas/cream/utils/__init__.py b/examples/nas/cream/utils/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/examples/nas/cream/utils/flops_table.py b/examples/nas/cream/utils/flops_table.py new file mode 100755 index 0000000000..b46476290b --- /dev/null +++ b/examples/nas/cream/utils/flops_table.py @@ -0,0 +1,77 @@ +import torch +from ptflops import get_model_complexity_info + +class LatencyEst(object): + def __init__(self, model, input_shape=(1, 3, 224, 224), device='cpu'): + self.block_num = len(model.blocks) + self.choice_num = len(model.blocks[0]) + self.latency_dict = {} + self.flops_dict = {} + self.params_dict = {} + + if device == 'cpu': + model = model.cpu() + else: + model = model.cuda() + + self.params_fixed = 0 + self.flops_fixed = 0 + + input = torch.randn((2, 3, 224, 224)) + + flops, params = get_model_complexity_info(model.conv_stem, (3, 224, 224), as_strings=False, print_per_layer_stat=False) + self.params_fixed += params / 1e6 + self.flops_fixed += flops / 1e6 + + input = model.conv_stem(input) + + # for block_id, block in enumerate(model.blocks): + # self.flops_dict[block_id] = {} + # self.params_dict[block_id] = {} + for module_id, module in enumerate(model.blocks): + self.flops_dict[module_id] = {} + self.params_dict[module_id] = {} + for choice_id, choice in enumerate(module): + flops, params = get_model_complexity_info(choice, tuple(input.shape[1:]), as_strings=False, print_per_layer_stat=False) + self.flops_dict[module_id][choice_id] = flops / 1e6 # M + self.params_dict[module_id][choice_id] = params /1e6 # M + + input = choice(input) + + # conv_last + flops, params = get_model_complexity_info(model.global_pool, tuple(input.shape[1:]), as_strings=False, print_per_layer_stat=False) + self.params_fixed += params / 1e6 + self.flops_fixed += flops / 1e6 + + input = model.global_pool(input) + + # globalpool + flops, params = get_model_complexity_info(model.conv_head, tuple(input.shape[1:]), as_strings=False, print_per_layer_stat=False) + self.params_fixed += params / 1e6 + self.flops_fixed += flops / 1e6 + + # return params (M) + def get_params(self, arch): + params = 0 + for block_id, block in enumerate(arch.keys()): + if block is 'LayerChoice1' or block is 'LayerChoice23': + continue + for idx, choice in enumerate(arch[block]): + params += self.params_dict[block_id][idx] * (choice is True) + return params + self.params_fixed + + # return flops (M) + def get_flops(self, arch): + flops = 0 + for block_id, block in enumerate(arch.keys()): + if block is 'LayerChoice1' or block_id is 'LayerChoice23': + continue + for idx, choice in enumerate(arch[block]): + flops += self.flops_dict[block_id][idx] * (1 if choice else 0) + return flops + self.flops_fixed + +if __name__ == '__main__': + from models.hypernet import _gen_supernet + model = _gen_supernet() + est = LatencyEst(model) + print(est.get_flops([[0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0]])) diff --git a/examples/nas/cream/utils/helpers.py b/examples/nas/cream/utils/helpers.py new file mode 100755 index 0000000000..dc42f65318 --- /dev/null +++ b/examples/nas/cream/utils/helpers.py @@ -0,0 +1,169 @@ +import os +import csv +import torch +import logging +import logging.handlers + +from collections import OrderedDict +from torch import distributed as dist + +from utils.saver import unwrap_model + +def get_logger(file_path, time=True): + """ Make python logger """ + logger = logging.getLogger("train") + if time: + log_format = '%(asctime)s | %(message)s' + else: + log_format = '%(message)s' + formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p') + file_handler = logging.FileHandler(file_path) + file_handler.setFormatter(formatter) + stream_handler = logging.StreamHandler() + stream_handler.setFormatter(formatter) + + logger.addHandler(file_handler) + logger.addHandler(stream_handler) + logger.setLevel(logging.INFO) + logger.propagate = False + + return logger + +def load_state_dict(checkpoint_path, use_ema=False): + if checkpoint_path and os.path.isfile(checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + state_dict_key = 'state_dict' + if isinstance(checkpoint, dict): + if use_ema and 'state_dict_ema' in checkpoint: + state_dict_key = 'state_dict_ema' + if state_dict_key and state_dict_key in checkpoint: + new_state_dict = OrderedDict() + for k, v in checkpoint[state_dict_key].items(): + # strip `module.` prefix + name = k[7:] if k.startswith('module') else k + new_state_dict[name] = v + state_dict = new_state_dict + else: + state_dict = checkpoint + logging.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path)) + return state_dict + else: + logging.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + +def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True): + state_dict = load_state_dict(checkpoint_path, use_ema) + model.load_state_dict(state_dict, strict=strict) + +def resume_checkpoint(model, checkpoint_path): + other_state = {} + resume_epoch = None + if os.path.isfile(checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict'].items(): + name = k[7:] if k.startswith('module') else k + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + if 'optimizer' in checkpoint: + other_state['optimizer'] = checkpoint['optimizer'] + if 'amp' in checkpoint: + other_state['amp'] = checkpoint['amp'] + if 'epoch' in checkpoint: + resume_epoch = checkpoint['epoch'] + if 'version' in checkpoint and checkpoint['version'] > 1: + resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save + logging.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch'])) + else: + model.load_state_dict(checkpoint) + logging.info("Loaded checkpoint '{}'".format(checkpoint_path)) + return other_state, resume_epoch + else: + logging.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + +class AverageMeter: + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + maxk = max(topk) + batch_size = target.size(0) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + return [correct[:k].view(-1).float().sum(0) * 100. / batch_size for k in topk] + +def get_outdir(path, *paths, inc=False): + outdir = os.path.join(path, *paths) + if not os.path.exists(outdir): + os.makedirs(outdir) + elif inc: + count = 1 + outdir_inc = outdir + '-' + str(count) + while os.path.exists(outdir_inc): + count = count + 1 + outdir_inc = outdir + '-' + str(count) + assert count < 100 + outdir = outdir_inc + os.makedirs(outdir) + return outdir + +def update_summary(epoch, train_metrics, eval_metrics, filename, write_header=False): + rowd = OrderedDict(epoch=epoch) + rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) + rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) + with open(filename, mode='a') as cf: + dw = csv.DictWriter(cf, fieldnames=rowd.keys()) + if write_header: # first iteration (epoch == 1 can't be used) + dw.writeheader() + dw.writerow(rowd) + +def reduce_tensor(tensor, n): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= n + return rt + +def distribute_bn(model, world_size, reduce=False): + # ensure every node has the same running bn stats + for bn_name, bn_buf in unwrap_model(model).named_buffers(recurse=True): + if ('running_mean' in bn_name) or ('running_var' in bn_name): + if reduce: + # average bn stats across whole group + torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM) + bn_buf /= float(world_size) + else: + # broadcast bn stats from rank 0 to whole group + torch.distributed.broadcast(bn_buf, 0) + +class FormatterNoInfo(logging.Formatter): + def __init__(self, fmt='%(levelname)s: %(message)s'): + logging.Formatter.__init__(self, fmt) + + def format(self, record): + if record.levelno == logging.INFO: + return str(record.getMessage()) + return logging.Formatter.format(self, record) + + +def setup_default_logging(default_level=logging.INFO): + console_handler = logging.StreamHandler() + console_handler.setFormatter(FormatterNoInfo()) + logging.root.addHandler(console_handler) + logging.root.setLevel(default_level) \ No newline at end of file diff --git a/examples/nas/cream/utils/loss.py b/examples/nas/cream/utils/loss.py new file mode 100755 index 0000000000..cc4586ebe4 --- /dev/null +++ b/examples/nas/cream/utils/loss.py @@ -0,0 +1,31 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def cross_entropy_loss_with_soft_target(pred, soft_target): + logsoftmax = nn.LogSoftmax() + return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1)) + + +class LabelSmoothingCrossEntropy(nn.Module): + """ + NLL loss with label smoothing. + """ + def __init__(self, smoothing=0.1): + """ + Constructor for the LabelSmoothing module. + :param smoothing: label smoothing factor + """ + super(LabelSmoothingCrossEntropy, self).__init__() + assert smoothing < 1.0 + self.smoothing = smoothing + self.confidence = 1. - smoothing + + def forward(self, x, target): + logprobs = F.log_softmax(x, dim=-1) + nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) + nll_loss = nll_loss.squeeze(1) + smooth_loss = -logprobs.mean(dim=-1) + loss = self.confidence * nll_loss + self.smoothing * smooth_loss + return loss.mean() \ No newline at end of file diff --git a/examples/nas/cream/utils/optimizer.py b/examples/nas/cream/utils/optimizer.py new file mode 100755 index 0000000000..ed49f48d1b --- /dev/null +++ b/examples/nas/cream/utils/optimizer.py @@ -0,0 +1,162 @@ +import torch +from torch import optim as optim +from torch.optim import Optimizer + +class RMSpropTF(Optimizer): + """Implements RMSprop algorithm (TensorFlow style epsilon) + NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt + to closer match Tensorflow for matching hyper-params. + Proposed by G. Hinton in his + `course `_. + The centered version first appears in `Generating Sequences + With Recurrent Neural Networks `_. + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + momentum (float, optional): momentum factor (default: 0) + alpha (float, optional): smoothing (decay) constant (default: 0.9) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-10) + centered (bool, optional) : if ``True``, compute the centered RMSProp, + the gradient is normalized by an estimation of its variance + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101 + lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer + update as per defaults in Tensorflow + """ + + def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False, + decoupled_decay=False, lr_in_momentum=True): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= momentum: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= alpha: + raise ValueError("Invalid alpha value: {}".format(alpha)) + + defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, + decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum) + super(RMSpropTF, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RMSpropTF, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('momentum', 0) + group.setdefault('centered', False) + + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('RMSprop does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['square_avg'] = torch.ones_like(p.data) # PyTorch inits to zero + if group['momentum'] > 0: + state['momentum_buffer'] = torch.zeros_like(p.data) + if group['centered']: + state['grad_avg'] = torch.zeros_like(p.data) + + square_avg = state['square_avg'] + one_minus_alpha = 1. - group['alpha'] + + state['step'] += 1 + + if group['weight_decay'] != 0: + if 'decoupled_decay' in group and group['decoupled_decay']: + p.data.add_(-group['weight_decay'], p.data) + else: + grad = grad.add(group['weight_decay'], p.data) + + # Tensorflow order of ops for updating squared avg + square_avg.add_(one_minus_alpha, grad.pow(2) - square_avg) + # square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad) # PyTorch original + + if group['centered']: + grad_avg = state['grad_avg'] + grad_avg.add_(one_minus_alpha, grad - grad_avg) + # grad_avg.mul_(alpha).add_(1 - alpha, grad) # PyTorch original + avg = square_avg.addcmul(-1, grad_avg, grad_avg).add(group['eps']).sqrt_() # eps moved in sqrt + else: + avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt + + if group['momentum'] > 0: + buf = state['momentum_buffer'] + # Tensorflow accumulates the LR scaling in the momentum buffer + if 'lr_in_momentum' in group and group['lr_in_momentum']: + buf.mul_(group['momentum']).addcdiv_(group['lr'], grad, avg) + p.data.add_(-buf) + else: + # PyTorch scales the param update by LR + buf.mul_(group['momentum']).addcdiv_(grad, avg) + p.data.add_(-group['lr'], buf) + else: + p.data.addcdiv_(-group['lr'], grad, avg) + + return loss + + +def add_weight_decay(model, weight_decay=1e-5, skip_list=()): + decay = [] + no_decay = [] + for name, param in model.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: + no_decay.append(param) + else: + decay.append(param) + return [ + {'params': no_decay, 'weight_decay': 0.}, + {'params': decay, 'weight_decay': weight_decay}] + + +def create_optimizer(args, model, filter_bias_and_bn=True): + opt_lower = args.opt.lower() + weight_decay = args.weight_decay + if weight_decay and filter_bias_and_bn: + parameters = add_weight_decay(model, weight_decay) + weight_decay = 0. + else: + parameters = model.parameters() + + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if opt_lower == 'sgd' or opt_lower == 'nesterov': + optimizer = optim.SGD( + parameters, lr=args.lr, momentum=args.momentum, weight_decay=weight_decay, nesterov=True) + elif opt_lower == 'momentum': + optimizer = optim.SGD( + parameters, lr=args.lr, momentum=args.momentum, weight_decay=weight_decay, nesterov=False) + elif opt_lower == 'adam': + optimizer = optim.Adam( + parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps) + elif opt_lower == 'rmsproptf': + optimizer = RMSpropTF( + parameters, lr=args.lr, alpha=0.9, eps=args.opt_eps, + momentum=args.momentum, weight_decay=weight_decay) + else: + assert False and "Invalid optimizer" + raise ValueError + + return optimizer \ No newline at end of file diff --git a/examples/nas/cream/utils/saver.py b/examples/nas/cream/utils/saver.py new file mode 100755 index 0000000000..2ad3d4659b --- /dev/null +++ b/examples/nas/cream/utils/saver.py @@ -0,0 +1,140 @@ +import torch +import os +import glob +import operator +import logging +import shutil + +from utils.EMA import ModelEma + +def unwrap_model(model): + if isinstance(model, ModelEma): + return unwrap_model(model.ema) + else: + return model.module if hasattr(model, 'module') else model + +def get_state_dict(model): + return unwrap_model(model).state_dict() + +class CheckpointSaver: + def __init__( + self, + checkpoint_prefix='checkpoint', + recovery_prefix='recovery', + checkpoint_dir='', + recovery_dir='', + decreasing=False, + max_history=10): + + # state + self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness + self.best_epoch = None + self.best_metric = None + self.curr_recovery_file = '' + self.last_recovery_file = '' + + # config + self.checkpoint_dir = checkpoint_dir + self.recovery_dir = recovery_dir + self.save_prefix = checkpoint_prefix + self.recovery_prefix = recovery_prefix + self.extension = '.pth.tar' + self.decreasing = decreasing # a lower metric is better if True + self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs + self.max_history = max_history + assert self.max_history >= 1 + + def save_checkpoint(self, model, optimizer, args, epoch, model_ema=None, metric=None, use_amp=False): + assert epoch >= 0 + tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) + last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) + self._save(tmp_save_path, model, optimizer, args, epoch, model_ema, metric, use_amp) + if os.path.exists(last_save_path): + os.remove(last_save_path) + #os.unlink(last_save_path)# required for Windows support. + os.rename(tmp_save_path, last_save_path) + worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None + if (len(self.checkpoint_files) < self.max_history + or metric is None or self.cmp(metric, worst_file[1])): + if len(self.checkpoint_files) >= self.max_history: + self._cleanup_checkpoints(1) + filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension + save_path = os.path.join(self.checkpoint_dir, filename) + self._save(save_path, model, optimizer, args, epoch, model_ema, metric, use_amp) + # os.link(last_save_path, save_path) + self.checkpoint_files.append((save_path, metric)) + self.checkpoint_files = sorted( + self.checkpoint_files, key=lambda x: x[1], + reverse=not self.decreasing) # sort in descending order if a lower metric is not better + + checkpoints_str = "Current checkpoints:\n" + for c in self.checkpoint_files: + checkpoints_str += ' {}\n'.format(c) + logging.info(checkpoints_str) + + if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): + self.best_epoch = epoch + self.best_metric = metric + best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) + + # if os.path.exists(best_save_path): + # os.unlink(best_save_path) + # os.link(last_save_path, best_save_path) + if os.path.exists(best_save_path): + os.remove(best_save_path) + self._save(best_save_path, model, optimizer, args, epoch, model_ema, metric, use_amp) + + + return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) + + def _save(self, save_path, model, optimizer, args, epoch, model_ema=None, metric=None, use_amp=False): + save_state = { + 'epoch': epoch, + 'arch': args.model, + 'state_dict': get_state_dict(model), + 'optimizer': optimizer.state_dict(), + 'args': args, + 'version': 2, # version < 2 increments epoch before save + } + if model_ema is not None: + save_state['state_dict_ema'] = get_state_dict(model_ema) + if metric is not None: + save_state['metric'] = metric + torch.save(save_state, save_path) + + def _cleanup_checkpoints(self, trim=0): + trim = min(len(self.checkpoint_files), trim) + delete_index = self.max_history - trim + if delete_index <= 0 or len(self.checkpoint_files) <= delete_index: + return + to_delete = self.checkpoint_files[delete_index:] + for d in to_delete: + try: + logging.debug("Cleaning checkpoint: {}".format(d)) + os.remove(d[0]) + except Exception as e: + logging.error("Exception '{}' while deleting checkpoint".format(e)) + self.checkpoint_files = self.checkpoint_files[:delete_index] + + def save_recovery(self, model, optimizer, args, epoch, model_ema=None, use_amp=False, batch_idx=0): + assert epoch >= 0 + filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension + save_path = os.path.join(self.recovery_dir, filename) + self._save(save_path, model, optimizer, args, epoch, model_ema, use_amp=use_amp) + if os.path.exists(self.last_recovery_file): + try: + logging.debug("Cleaning recovery: {}".format(self.last_recovery_file)) + os.remove(self.last_recovery_file) + except Exception as e: + logging.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) + self.last_recovery_file = self.curr_recovery_file + self.curr_recovery_file = save_path + + def find_recovery(self): + recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) + files = glob.glob(recovery_path + '*' + self.extension) + files = sorted(files) + if len(files): + return files[0] + else: + return '' \ No newline at end of file diff --git a/examples/nas/cream/utils/scheduler.py b/examples/nas/cream/utils/scheduler.py new file mode 100755 index 0000000000..8826bd8b22 --- /dev/null +++ b/examples/nas/cream/utils/scheduler.py @@ -0,0 +1,309 @@ +import logging +import math +import numpy as np +import torch + +from typing import Dict, Any + +class Scheduler: + """ Parameter Scheduler Base Class + A scheduler base class that can be used to schedule any optimizer parameter groups. + Unlike the builtin PyTorch schedulers, this is intended to be consistently called + * At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value + * At the END of each optimizer update, after incrementing the update count, to calculate next update's value + The schedulers built on this should try to remain as stateless as possible (for simplicity). + This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch' + and -1 values for special behaviour. All epoch and update counts must be tracked in the training + code and explicitly passed in to the schedulers on the corresponding step or step_update call. + Based on ideas from: + * https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler + * https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + param_group_field: str, + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize: bool = True) -> None: + self.optimizer = optimizer + self.param_group_field = param_group_field + self._initial_param_group_field = f"initial_{param_group_field}" + if initialize: + for i, group in enumerate(self.optimizer.param_groups): + if param_group_field not in group: + raise KeyError(f"{param_group_field} missing from param_groups[{i}]") + group.setdefault(self._initial_param_group_field, group[param_group_field]) + else: + for i, group in enumerate(self.optimizer.param_groups): + if self._initial_param_group_field not in group: + raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") + self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] + self.metric = None # any point to having this for all? + self.noise_range_t = noise_range_t + self.noise_pct = noise_pct + self.noise_type = noise_type + self.noise_std = noise_std + self.noise_seed = noise_seed if noise_seed is not None else 42 + self.update_groups(self.base_values) + + def state_dict(self) -> Dict[str, Any]: + return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + self.__dict__.update(state_dict) + + def get_epoch_values(self, epoch: int): + return None + + def get_update_values(self, num_updates: int): + return None + + def step(self, epoch: int, metric: float = None) -> None: + self.metric = metric + values = self.get_epoch_values(epoch) + if values is not None: + values = self._add_noise(values, epoch) + self.update_groups(values) + + def step_update(self, num_updates: int, metric: float = None): + self.metric = metric + values = self.get_update_values(num_updates) + if values is not None: + values = self._add_noise(values, num_updates) + self.update_groups(values) + + def update_groups(self, values): + if not isinstance(values, (list, tuple)): + values = [values] * len(self.optimizer.param_groups) + for param_group, value in zip(self.optimizer.param_groups, values): + param_group[self.param_group_field] = value + + def _add_noise(self, lrs, t): + if self.noise_range_t is not None: + if isinstance(self.noise_range_t, (list, tuple)): + apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] + else: + apply_noise = t >= self.noise_range_t + if apply_noise: + g = torch.Generator() + g.manual_seed(self.noise_seed + t) + if self.noise_type == 'normal': + while True: + # resample if noise out of percent limit, brute force but shouldn't spin much + noise = torch.randn(1, generator=g).item() + if abs(noise) < self.noise_pct: + break + else: + noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct + lrs = [v + v * noise for v in lrs] + return lrs + +logger = logging.getLogger(__name__) + +class CosineLRScheduler(Scheduler): + """ + Cosine decay with restarts. + This is described in the paper https://arxiv.org/abs/1608.03983. + Inspiration from + https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + t_mul: float = 1., + lr_min: float = 0., + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + cycle_limit=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and t_mul == 1 and decay_rate == 1: + logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.t_mul = t_mul + self.lr_min = lr_min + self.decay_rate = decay_rate + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.t_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul)) + t_i = self.t_mul ** i * self.t_initial + t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.decay_rate ** i + lr_min = self.lr_min * gamma + lr_max_values = [v * gamma for v in self.base_values] + + if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit): + lrs = [ + lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i)) for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + if not cycles: + cycles = self.cycle_limit + assert cycles > 0 + if self.t_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul))) + +class StepLRScheduler(Scheduler): + """ + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + decay_t: float, + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + +def create_scheduler(args, optimizer): + num_epochs = args.epochs + + if args.lr_noise is not None: + if isinstance(args.lr_noise, (list, tuple)): + noise_range = [n * num_epochs for n in args.lr_noise] + if len(noise_range) == 1: + noise_range = noise_range[0] + else: + noise_range = args.lr_noise * num_epochs + else: + noise_range = None + + lr_scheduler = None + #FIXME expose cycle parms of the scheduler config to arguments + if args.sched == 'cosine': + lr_scheduler = CosineLRScheduler( + optimizer, + t_initial=num_epochs, + t_mul=1.0, + lr_min=args.min_lr, + decay_rate=args.decay_rate, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + cycle_limit=1, + t_in_epochs=True, + noise_range_t=noise_range, + noise_pct=args.lr_noise_pct, + noise_std=args.lr_noise_std, + noise_seed=args.seed, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + elif args.sched == 'step': + lr_scheduler = StepLRScheduler( + optimizer, + decay_t=args.decay_epochs, + decay_rate=args.decay_rate, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + noise_range_t=noise_range, + noise_pct=args.lr_noise_pct, + noise_std=args.lr_noise_std, + noise_seed=args.seed, + ) + elif args.sched == 'spos_linear': + ITERS = args.epochs * 1251 + lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, + lambda step: ( + 1.0 - step / ITERS) if step <= ITERS else 0, + last_epoch=-1) + + return lr_scheduler, num_epochs \ No newline at end of file diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/__init__.py b/src/sdk/pynni/nni/nas/pytorch/cream/__init__.py new file mode 100755 index 0000000000..d7a61ddad9 --- /dev/null +++ b/src/sdk/pynni/nni/nas/pytorch/cream/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +# from .mutator import RegularizedDartsMutator, RegularizedMutatorParallel, DartsDiscreteMutator +from .trainer import CreamSupernetTrainer +from .mutator import CreamSupernetTrainingMutator \ No newline at end of file diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py b/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py new file mode 100755 index 0000000000..f87ba40307 --- /dev/null +++ b/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py @@ -0,0 +1,67 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging + +import numpy as np + +from nni.nas.pytorch.mutables import LayerChoice, InputChoice +from nni.nas.pytorch.random import RandomMutator + +_logger = logging.getLogger(__name__) + + +class CreamSupernetTrainingMutator(RandomMutator): + """ + A random mutator with flops limit. + + Parameters + ---------- + model : nn.Module + PyTorch model. + flops_func : callable + Callable that takes a candidate from `sample_search` and returns its candidate. When `flops_func` + is None, functions related to flops will be deactivated. + flops_lb : number + Lower bound of flops. + flops_ub : number + Upper bound of flops. + flops_bin_num : number + Number of bins divided for the interval of flops to ensure the uniformity. Bigger number will be more + uniform, but the sampling will be slower. + flops_sample_timeout : int + Maximum number of attempts to sample before giving up and use a random candidate. + """ + def __init__(self, model, how_to_prob='even', pre_prob=(0.05,0.05,0.2,0.4,0.2,0.1), CHOICE_NUM=6, sta_num=(4,4,4,4,4)): + + super().__init__(model) + self.how_to_prob = how_to_prob + self.pre_prob = pre_prob + self.CHOICE_NUM = CHOICE_NUM + self.sta_num = sta_num + + def get_prob(self): + if self.how_to_prob == 'even': + return None + elif self.how_to_prob == 'pre_prob': + return self.pre_prob + else: + raise ValueError("prob method not supported") + + def sample_search(self): + """ + Sample a candidate for training. When `flops_func` is not None, candidates will be sampled uniformly + relative to flops. + + Returns + ------- + dict + """ + + return super(CreamSupernetTrainingMutator, self).sample_search() + + def sample_final(self): + """ + Implement only to suffice the interface of Mutator. + """ + return self.sample_search() diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py new file mode 100755 index 0000000000..a915583fb5 --- /dev/null +++ b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py @@ -0,0 +1,312 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import logging + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.distributed as dist +from copy import deepcopy +from nni.nas.pytorch.trainer import Trainer +from nni.nas.pytorch.utils import AverageMeterGroup + +from .mutator import CreamSupernetTrainingMutator + +logger = logging.getLogger(__name__) + +class CreamSupernetTrainer(Trainer): + """ + This trainer trains a supernet that can be used for evolution search. + + Parameters + ---------- + model : nn.Module + Model with mutables. + mutator : Mutator + A mutator object that has been initialized with the model. + loss : callable + Called with logits and targets. Returns a loss tensor. + metrics : callable + Returns a dict that maps metrics keys to metrics data. + optimizer : Optimizer + Optimizer that optimizes the model. + num_epochs : int + Number of epochs of training. + train_loader : iterable + Data loader of training. Raise ``StopIteration`` when one epoch is exhausted. + dataset_valid : iterable + Data loader of validation. Raise ``StopIteration`` when one epoch is exhausted. + batch_size : int + Batch size. + workers: int + Number of threads for data preprocessing. Not used for this trainer. Maybe removed in future. + device : torch.device + Device object. Either ``torch.device("cuda")`` or ``torch.device("cpu")``. When ``None``, trainer will + automatic detects GPU and selects GPU first. + log_frequency : int + Number of mini-batches to log metrics. + callbacks : list of Callback + Callbacks to plug into the trainer. See Callbacks. + """ + + def __init__(self, model, loss, + optimizer, num_epochs, train_loader, valid_loader, + mutator=None, batch_size=64, log_frequency=None, + est=None, meta_sta_epoch=20, update_iter=200, slices=2, pool_size=10, + pick_method='meta', lr_scheduler=None, distributed=True, local_rank=0, val_loss=None): + assert torch.cuda.is_available() + super(CreamSupernetTrainer, self).__init__(model, mutator, loss, None, optimizer, num_epochs, + train_loader, valid_loader, batch_size, 8, + 'cuda', log_frequency, None) + self.train_loader = train_loader + self.valid_loader = valid_loader + self.log_frequency = log_frequency + self.batch_size = batch_size + self.mutator = mutator + self.optimizer = optimizer + self.model = model + self.loss = loss + self.est = est + self.best_children_pool = [] + self.num_epochs = num_epochs + self.meta_sta_epoch = meta_sta_epoch + self.update_iter = update_iter + self.slices = slices + self.pick_method = pick_method + self.pool_size = pool_size + self.main_proc = not distributed or local_rank == 0 + self.distributed = distributed + self.val_loss = val_loss + self.lr_scheduler = lr_scheduler + self.callbacks = [] + self.arch_dict = dict() + + def cross_entropy_loss_with_soft_target(self, pred, soft_target): + logsoftmax = nn.LogSoftmax() + return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1)) + + def reduce_tensor(self, tensor): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= float(os.environ["WORLD_SIZE"]) + return rt + + def reduce_metrics(self, metrics, distributed=False): + if distributed: + return {k: self.reduce_tensor(v).item() for k, v in metrics.items()} + return {k: v.item() for k, v in metrics.items()} + + def accuracy(self, output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + maxk = max(topk) + batch_size = target.size(0) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + return [correct[:k].view(-1).float().sum(0) * 100. / batch_size for k in topk] + + def train_one_epoch(self, epoch): + def get_model(model): + try: + return model.module + except: + return model + + meters = AverageMeterGroup() + for step, (input, target) in enumerate(self.train_loader): + self.optimizer.zero_grad() + self.mutator.reset() + + input = input.cuda() + target = target.cuda() + + cand_flops = self.est.get_flops(self.mutator._cache) + + if epoch > self.meta_sta_epoch and step > 0 and step % self.update_iter == 0: + + slice = self.slices + x = deepcopy(input[:slice].clone().detach()) + + if len(self.best_children_pool) > 0: + if self.pick_method == 'top1': + meta_value, cand = 1, sorted(self.best_children_pool, reverse=True)[0][3] + elif self.pick_method == 'meta': + meta_value, cand_idx, cand = -1000000000, -1, None + for now_idx, item in enumerate(self.best_children_pool): + inputx = item[3] + output = F.softmax(self.model(inputx), dim=1) + weight = get_model(self.model).forward_meta(output - item[4]) + if weight > meta_value: + meta_value = weight # deepcopy(torch.nn.functional.sigmoid(weight)) + cand_idx = now_idx + cand = self.arch_dict[(self.best_children_pool[cand_idx][0], + self.best_children_pool[cand_idx][2])] + assert cand is not None + meta_value = torch.nn.functional.sigmoid(-weight) + else: + raise ValueError('Method Not supported') + + u_output = self.model(x) + + saved_cache = self.mutator._cache + self.mutator._cache = cand + u_teacher_output = self.model(x) + self.mutator._cache = saved_cache + + u_soft_label = F.softmax(u_teacher_output, dim=1) + kd_loss = meta_value * self.cross_entropy_loss_with_soft_target(u_output, u_soft_label) + self.optimizer.zero_grad() + + grad_1 = torch.autograd.grad(kd_loss, + get_model(self.model).rand_parameters(self.mutator._cache), + create_graph=True) + + def raw_sgd(w, g): + return g * self.optimizer.param_groups[-1]['lr'] + w + + students_weight = [raw_sgd(p, grad_item) + for p, grad_item in + zip(get_model(self.model).rand_parameters(self.mutator._cache), grad_1)] + + # update student weights + for weight, grad_item in zip(get_model(self.model).rand_parameters(self.mutator._cache), grad_1): + weight.grad = grad_item + torch.nn.utils.clip_grad_norm_(get_model(self.model).rand_parameters(self.mutator._cache), 1) + self.optimizer.step() + for weight, grad_item in zip(get_model(self.model).rand_parameters(self.mutator._cache), grad_1): + del weight.grad + + held_out_x = input[slice:slice * 2].clone() + output_2 = self.model(held_out_x) + valid_loss = self.loss(output_2, target[slice:slice * 2]) + self.optimizer.zero_grad() + + grad_student_val = torch.autograd.grad(valid_loss, + get_model(self.model).rand_parameters(self.mutator._cache), + retain_graph=True) + + grad_teacher = torch.autograd.grad(students_weight[0], + get_model(self.model).rand_parameters(cand, + self.pick_method == 'meta'), + grad_outputs=grad_student_val) + + # update teacher model + for weight, grad_item in zip(get_model(self.model).rand_parameters(cand, self.pick_method == 'meta'), + grad_teacher): + weight.grad = grad_item + torch.nn.utils.clip_grad_norm_( + get_model(self.model).rand_parameters(self.mutator._cache, self.pick_method == 'meta'), 1) + self.optimizer.step() + for weight, grad_item in zip(get_model(self.model).rand_parameters(cand, self.pick_method == 'meta'), + grad_teacher): + del weight.grad + + for item in students_weight: + del item + del grad_teacher, grad_1, grad_student_val, x, held_out_x + del valid_loss, kd_loss, u_soft_label, u_output, u_teacher_output, output_2 + + else: + raise ValueError("Must 1nd or 2nd update teacher weights") + + # get_best_teacher + if len(self.best_children_pool) > 0: + if self.pick_method == 'top1': + meta_value, cand = 0.5, sorted(self.best_children_pool, reverse=True)[0][3] + elif self.pick_method == 'meta': + meta_value, cand_idx, cand = -1000000000, -1, None + for now_idx, item in enumerate(self.best_children_pool): + inputx = item[3] + output = F.softmax(self.model(inputx), dim=1) + weight = get_model(self.model).forward_meta(output - item[4]) + if weight > meta_value: + meta_value = weight # deepcopy(torch.nn.functional.sigmoid(weight)) + cand_idx = now_idx + cand = self.arch_dict[(self.best_children_pool[cand_idx][0], + self.best_children_pool[cand_idx][2])] + assert cand is not None + meta_value = torch.nn.functional.sigmoid(-weight) + else: + raise ValueError('Method Not supported') + + if len(self.best_children_pool) == 0: + output = self.model(input) + loss = self.loss(output, target) + kd_loss = loss + elif epoch <= self.meta_sta_epoch: + output = self.model(input) + loss = self.loss(output, target) + else: + output = self.model(input) + with torch.no_grad(): + # save student arch + saved_cache = self.mutator._cache + self.mutator._cache = cand + + # forward + teacher_output = self.model(input).detach() + + # restore student arch + self.mutator._cache = saved_cache + soft_label = F.softmax(teacher_output, dim=1) + kd_loss = self.cross_entropy_loss_with_soft_target(output, soft_label) + valid_loss = self.loss(output, target) + loss = (meta_value * kd_loss + (2 - meta_value) * valid_loss) / 2 + + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + + prec1, prec5 = self.accuracy(output, target, topk=(1, 5)) + metrics = {"prec1": prec1, "prec5": prec5, "loss": loss} + metrics = self.reduce_metrics(metrics, self.distributed) + meters.update(metrics) + + # best_children_pool = sorted(best_children_pool, reverse=True) + if epoch > self.meta_sta_epoch and ( + (len(self.best_children_pool) < self.pool_size) or (prec1 > self.best_children_pool[-1][1] + 5) or ( + prec1 > self.best_children_pool[-1][1] and cand_flops < self.best_children_pool[-1][2])): + val_prec1 = prec1 + training_data = deepcopy(input[:self.slices].detach()) + if len(self.best_children_pool) == 0: + features = deepcopy(output[:self.slices].detach()) + else: + features = deepcopy(teacher_output[:self.slices].detach()) + self.best_children_pool.append( + (val_prec1, prec1, cand_flops, training_data, F.softmax(features, dim=1))) + self.arch_dict[(val_prec1, cand_flops)] = self.mutator._cache + self.best_children_pool = sorted(self.best_children_pool, reverse=True) + + if len(self.best_children_pool) > self.pool_size: + self.best_children_pool = sorted(self.best_children_pool, reverse=True) + del self.best_children_pool[-1] + + if self.lr_scheduler is not None: + self.lr_scheduler.step() + + if self.main_proc and self.log_frequency is not None and step % self.log_frequency == 0: + logger.info("Epoch [%s/%s] Step [%s/%s] %s", epoch + 1, + self.num_epochs, step + 1, len(self.train_loader), meters) + + if self.main_proc: + for idx, i in enumerate(self.best_children_pool): + logger.info("No.{} {}".format(idx, i[:4])) + + def validate_one_epoch(self, epoch): + self.model.eval() + meters = AverageMeterGroup() + with torch.no_grad(): + for step, (x, y) in enumerate(self.valid_loader): + self.mutator.reset() + logits = self.model(x) + loss = self.val_loss(logits, y) + prec1, prec5 = self.accuracy(logits, y, topk=(1, 5)) + metrics = {"prec1": prec1, "prec5": prec5, "loss": loss} + metrics = self.reduce_metrics(metrics, self.distributed) + meters.update(metrics) + + if self.log_frequency is not None and step % self.log_frequency == 0: + logger.info("Epoch [%s/%s] Validation Step [%s/%s] %s", epoch + 1, + self.num_epochs, step + 1, len(self.valid_loader), meters) From 57a2c40561578d361fbdbd030a1dd25842552e9c Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 20 Jul 2020 12:35:41 +0800 Subject: [PATCH 02/62] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 912d412314..5e1173f7ba 100644 --- a/README.md +++ b/README.md @@ -135,6 +135,7 @@ Within the following table, we summarized the current NNI capabilities, we are g
  • ProxylessNAS
  • Network Morphism
  • TextNAS
  • +
  • Cream
  • Model Compression From c15832f7a7ae78f436f6b7b09658678b6a7fe34a Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 31 Jul 2020 14:37:01 +0800 Subject: [PATCH 03/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 2f2d4573ce..016226a695 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -10,6 +10,15 @@ convergence ratio and performance of the hypernetwork, as well as boosting the t settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. ## Reproduction Results +Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior than 8 Gpus. + +| Model (M Flops) | NNI (8Gpus) | Paper (16Gpus) | +| ---- |:-------------:| :-----:| +| 42M | 65.8 | 66.5 | +| 112M | 72.8 | 72.1 | +| 470M | 78.9 | 79.2 | +| 600M | 79.4 | 80.0 | + ## Examples From 806937c3c009a1289b35aa5cf38a965e1e8edc0d Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 31 Jul 2020 15:31:37 +0800 Subject: [PATCH 04/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 016226a695..ea9ef4b2b4 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -15,7 +15,7 @@ Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior t | Model (M Flops) | NNI (8Gpus) | Paper (16Gpus) | | ---- |:-------------:| :-----:| | 42M | 65.8 | 66.5 | -| 112M | 72.8 | 72.1 | +| 112M | 72.1 | 72.8 | | 470M | 78.9 | 79.2 | | 600M | 79.4 | 80.0 | From b13fed0839400de66b845fb8accaef72ff4668c2 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 31 Jul 2020 16:15:01 +0800 Subject: [PATCH 05/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index ea9ef4b2b4..3210cdaa80 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -8,6 +8,7 @@ paths is able to boost the training of subnetworks. Since the prioritized paths one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. +For more details, pls refer to the [Paper] (https://github.com/microsoft/nni). ## Reproduction Results Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior than 8 Gpus. From d7c3217713b6f9dfd1da5b153dc4c7ea80fa092a Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 31 Jul 2020 16:18:36 +0800 Subject: [PATCH 06/62] Update requirements.txt --- examples/nas/cream/requirements.txt | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/nas/cream/requirements.txt b/examples/nas/cream/requirements.txt index 8e5a947d5b..816923758a 100755 --- a/examples/nas/cream/requirements.txt +++ b/examples/nas/cream/requirements.txt @@ -3,10 +3,9 @@ numpy==1.17 opencv-python==4.0.1.24 torchvision==0.2.1 thop -git+https://github.com/sovrasov/flops-counter.pytorch.git +ptflops pillow==6.1.0 torch==1.2 timm==0.1.20 -tensorboardx==1.2 -git+https://github.com/Tramac/torchscope.git -tensorboard \ No newline at end of file +torchscope +tensorboard From 1951db0c338611e20726cad4aa89d0beaf1aaf17 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 31 Jul 2020 20:05:56 +0800 Subject: [PATCH 07/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 3210cdaa80..b62e54ef1a 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -20,7 +20,6 @@ Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior t | 470M | 78.9 | 79.2 | | 600M | 79.4 | 80.0 | - ## Examples [Example code](https://github.com/microsoft/nni/tree/master/examples/nas/cream) @@ -29,15 +28,15 @@ Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior t * python >= 3.6 * torch >= 1.2 * torchscope -* apex (not necessary, please make sure your nvcc CUDA version is the same with pytorch CUDA verision) +* apex ## Data Preparation -You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./data/imagenet` and move the validation set to the subfolder `./data/imagenet/val`. To move the validation set, you cloud use the following script: +You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./examples/nas/cream/data/imagenet` and move the validation set to the subfolder `./examples/nas/cream/data/imagenet/val`. To move the validation set, you cloud use the following script: -Put the imagenet data in ${Root}/data. It should be like following: +Put the imagenet data in ./examples/nas/cream/data. It should be like following: ```buildoutcfg -${Root}/data/imagenet/train -${Root}/data/imagenet/val +./examples/nas/cream/data/imagenet/train +./examples/nas/cream/data/imagenet/val ... ``` @@ -51,7 +50,7 @@ First, build environments for searching. pip install -r ./examples/nas/cream/requirements.txt ``` -To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./examples/nas/cream/supernet.sh` +To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./examples/nas/cream/run.sh` ```buildoutcfg --flops_minimum 0 # Minimum Flops of Architecture --flops_maximum 600 # Maximum Flops of Architecture @@ -59,7 +58,7 @@ To search for an architecture, you need to configure the parameters `flops_minim After you specify the flops of the architectures you would like to search, you can search an architecture now by running: ```buildoutcfg -sh ./experiments/scripts/supernet.sh +sh ./examples/nas/cream/run.sh ``` @@ -73,15 +72,15 @@ To test our trained of models, you need to use `model_selection` in `./examples/ After specifying the flops of the model, you need to write the path to the resume model in `./examples/nas/cream/test.sh`. ```buildoutcfg ---resume './experiments/ckps/42.pth.tar' ---resume './experiments/ckps/470.pth.tar' +--resume './examples/nas/cream/experiments/ckps/42.pth.tar' +--resume './examples/nas/cream/experiments/ckps/470.pth.tar' ...... ``` We provide 14M/42M/114M/285M/470M/600M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). -After downloading the pretrained models and adding `--model_selection` and `--resume` in './experiments/scripts/test.sh', you need to use the following command to test the model. +After downloading the pretrained models and adding `--model_selection` and `--resume` in './examples/nas/cream/test.sh', you need to use the following command to test the model. ```buildoutcfg -sh ./experiments/scripts/test.sh +sh ./examples/nas/cream/test.sh ``` The test result will be saved in `./retrain`. You can configure the `--ouput` in `./examples/nas/cream/test.sh` to specify a path for it. From bce9cf21f0b6f9e6afe1ff07d68b344711262821 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 31 Jul 2020 20:10:55 +0800 Subject: [PATCH 08/62] Update requirements.txt --- examples/nas/cream/requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/nas/cream/requirements.txt b/examples/nas/cream/requirements.txt index 816923758a..03298d97c5 100755 --- a/examples/nas/cream/requirements.txt +++ b/examples/nas/cream/requirements.txt @@ -3,9 +3,9 @@ numpy==1.17 opencv-python==4.0.1.24 torchvision==0.2.1 thop -ptflops +git+https://github.com/sovrasov/flops-counter.pytorch.git pillow==6.1.0 torch==1.2 timm==0.1.20 -torchscope +git+https://github.com/Tramac/torchscope.git tensorboard From cda252bf284c6343ce68a87906e0bac31fac0a4c Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 31 Jul 2020 20:14:41 +0800 Subject: [PATCH 09/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index b62e54ef1a..eeaddb5017 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -8,7 +8,7 @@ paths is able to boost the training of subnetworks. Since the prioritized paths one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. -For more details, pls refer to the [Paper] (https://github.com/microsoft/nni). +For more details, pls refer to the [Paper](https://github.com/microsoft/nni). ## Reproduction Results Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior than 8 Gpus. From 29b6d5d92f0e72038a7921bc6e97f0170a403b19 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 31 Jul 2020 20:24:01 +0800 Subject: [PATCH 10/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 33 +++++++++++---------------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index eeaddb5017..352f535600 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -8,17 +8,8 @@ paths is able to boost the training of subnetworks. Since the prioritized paths one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. -For more details, pls refer to the [Paper](https://github.com/microsoft/nni). ## Reproduction Results -Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior than 8 Gpus. - -| Model (M Flops) | NNI (8Gpus) | Paper (16Gpus) | -| ---- |:-------------:| :-----:| -| 42M | 65.8 | 66.5 | -| 112M | 72.1 | 72.8 | -| 470M | 78.9 | 79.2 | -| 600M | 79.4 | 80.0 | ## Examples @@ -28,15 +19,15 @@ Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior t * python >= 3.6 * torch >= 1.2 * torchscope -* apex +* apex (not necessary, please make sure your nvcc CUDA version is the same with pytorch CUDA verision) ## Data Preparation -You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./examples/nas/cream/data/imagenet` and move the validation set to the subfolder `./examples/nas/cream/data/imagenet/val`. To move the validation set, you cloud use the following script: +You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./data/imagenet` and move the validation set to the subfolder `./data/imagenet/val`. To move the validation set, you cloud use the following script: -Put the imagenet data in ./examples/nas/cream/data. It should be like following: +Put the imagenet data in ${Root}/data. It should be like following: ```buildoutcfg -./examples/nas/cream/data/imagenet/train -./examples/nas/cream/data/imagenet/val +${Root}/data/imagenet/train +${Root}/data/imagenet/val ... ``` @@ -50,7 +41,7 @@ First, build environments for searching. pip install -r ./examples/nas/cream/requirements.txt ``` -To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./examples/nas/cream/run.sh` +To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./examples/nas/cream/supernet.sh` ```buildoutcfg --flops_minimum 0 # Minimum Flops of Architecture --flops_maximum 600 # Maximum Flops of Architecture @@ -58,7 +49,7 @@ To search for an architecture, you need to configure the parameters `flops_minim After you specify the flops of the architectures you would like to search, you can search an architecture now by running: ```buildoutcfg -sh ./examples/nas/cream/run.sh +sh ./experiments/scripts/supernet.sh ``` @@ -72,17 +63,15 @@ To test our trained of models, you need to use `model_selection` in `./examples/ After specifying the flops of the model, you need to write the path to the resume model in `./examples/nas/cream/test.sh`. ```buildoutcfg ---resume './examples/nas/cream/experiments/ckps/42.pth.tar' ---resume './examples/nas/cream/experiments/ckps/470.pth.tar' +--resume './experiments/ckps/42.pth.tar' +--resume './experiments/ckps/470.pth.tar' ...... ``` We provide 14M/42M/114M/285M/470M/600M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). -After downloading the pretrained models and adding `--model_selection` and `--resume` in './examples/nas/cream/test.sh', you need to use the following command to test the model. +After downloading the pretrained models and adding `--model_selection` and `--resume` in './experiments/scripts/test.sh', you need to use the following command to test the model. ```buildoutcfg -sh ./examples/nas/cream/test.sh +sh ./experiments/scripts/test.sh ``` The test result will be saved in `./retrain`. You can configure the `--ouput` in `./examples/nas/cream/test.sh` to specify a path for it. - - From e548cbc372538290b4d1cd4e341c12ce8b66308d Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 31 Jul 2020 20:30:01 +0800 Subject: [PATCH 11/62] Update Cream.md --- examples/nas/cream/Cream.md | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/examples/nas/cream/Cream.md b/examples/nas/cream/Cream.md index cd38d38542..352f535600 100644 --- a/examples/nas/cream/Cream.md +++ b/examples/nas/cream/Cream.md @@ -75,20 +75,3 @@ sh ./experiments/scripts/test.sh ``` The test result will be saved in `./retrain`. You can configure the `--ouput` in `./examples/nas/cream/test.sh` to specify a path for it. - - -### PyTorch - -```eval_rst -.. autoclass:: nni.nas.pytorch.cdarts.CdartsTrainer - :members: - -.. autoclass:: nni.nas.pytorch.cdarts.RegularizedDartsMutator - :members: - -.. autoclass:: nni.nas.pytorch.cdarts.DartsDiscreteMutator - :members: - -.. autoclass:: nni.nas.pytorch.cdarts.RegularizedMutatorParallel - :members: -``` \ No newline at end of file From d5c95c6ac62540d49b95165a1922f409293fd602 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 31 Jul 2020 20:38:45 +0800 Subject: [PATCH 12/62] Update Cream.md --- examples/nas/cream/Cream.md | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/examples/nas/cream/Cream.md b/examples/nas/cream/Cream.md index 352f535600..eeaddb5017 100644 --- a/examples/nas/cream/Cream.md +++ b/examples/nas/cream/Cream.md @@ -8,8 +8,17 @@ paths is able to boost the training of subnetworks. Since the prioritized paths one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. +For more details, pls refer to the [Paper](https://github.com/microsoft/nni). ## Reproduction Results +Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior than 8 Gpus. + +| Model (M Flops) | NNI (8Gpus) | Paper (16Gpus) | +| ---- |:-------------:| :-----:| +| 42M | 65.8 | 66.5 | +| 112M | 72.1 | 72.8 | +| 470M | 78.9 | 79.2 | +| 600M | 79.4 | 80.0 | ## Examples @@ -19,15 +28,15 @@ settings. Moreover, the experiments on object detection and more challenging sea * python >= 3.6 * torch >= 1.2 * torchscope -* apex (not necessary, please make sure your nvcc CUDA version is the same with pytorch CUDA verision) +* apex ## Data Preparation -You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./data/imagenet` and move the validation set to the subfolder `./data/imagenet/val`. To move the validation set, you cloud use the following script: +You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./examples/nas/cream/data/imagenet` and move the validation set to the subfolder `./examples/nas/cream/data/imagenet/val`. To move the validation set, you cloud use the following script: -Put the imagenet data in ${Root}/data. It should be like following: +Put the imagenet data in ./examples/nas/cream/data. It should be like following: ```buildoutcfg -${Root}/data/imagenet/train -${Root}/data/imagenet/val +./examples/nas/cream/data/imagenet/train +./examples/nas/cream/data/imagenet/val ... ``` @@ -41,7 +50,7 @@ First, build environments for searching. pip install -r ./examples/nas/cream/requirements.txt ``` -To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./examples/nas/cream/supernet.sh` +To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./examples/nas/cream/run.sh` ```buildoutcfg --flops_minimum 0 # Minimum Flops of Architecture --flops_maximum 600 # Maximum Flops of Architecture @@ -49,7 +58,7 @@ To search for an architecture, you need to configure the parameters `flops_minim After you specify the flops of the architectures you would like to search, you can search an architecture now by running: ```buildoutcfg -sh ./experiments/scripts/supernet.sh +sh ./examples/nas/cream/run.sh ``` @@ -63,15 +72,17 @@ To test our trained of models, you need to use `model_selection` in `./examples/ After specifying the flops of the model, you need to write the path to the resume model in `./examples/nas/cream/test.sh`. ```buildoutcfg ---resume './experiments/ckps/42.pth.tar' ---resume './experiments/ckps/470.pth.tar' +--resume './examples/nas/cream/experiments/ckps/42.pth.tar' +--resume './examples/nas/cream/experiments/ckps/470.pth.tar' ...... ``` We provide 14M/42M/114M/285M/470M/600M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). -After downloading the pretrained models and adding `--model_selection` and `--resume` in './experiments/scripts/test.sh', you need to use the following command to test the model. +After downloading the pretrained models and adding `--model_selection` and `--resume` in './examples/nas/cream/test.sh', you need to use the following command to test the model. ```buildoutcfg -sh ./experiments/scripts/test.sh +sh ./examples/nas/cream/test.sh ``` The test result will be saved in `./retrain`. You can configure the `--ouput` in `./examples/nas/cream/test.sh` to specify a path for it. + + From c73b95c6974618765b43f39b5bc2b3662e8ef336 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Sat, 1 Aug 2020 10:32:53 +0800 Subject: [PATCH 13/62] Update trainer.py --- .../pynni/nni/nas/pytorch/cream/trainer.py | 47 ++++++++++--------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py index a915583fb5..1e43f92bab 100755 --- a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py +++ b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py @@ -3,19 +3,19 @@ import os import logging - +from copy import deepcopy import torch import torch.nn as nn import torch.nn.functional as F import torch.distributed as dist -from copy import deepcopy from nni.nas.pytorch.trainer import Trainer from nni.nas.pytorch.utils import AverageMeterGroup -from .mutator import CreamSupernetTrainingMutator +#from .mutator import CreamSupernetTrainingMutator logger = logging.getLogger(__name__) + class CreamSupernetTrainer(Trainer): """ This trainer trains a supernet that can be used for evolution search. @@ -109,25 +109,25 @@ def accuracy(self, output, target, topk=(1,)): def train_one_epoch(self, epoch): def get_model(model): - try: - return model.module - except: - return model + #try: + return model.module + #except: + # return model meters = AverageMeterGroup() - for step, (input, target) in enumerate(self.train_loader): + for step, (input_data, target) in enumerate(self.train_loader): self.optimizer.zero_grad() self.mutator.reset() - input = input.cuda() + input_data = input_data.cuda() target = target.cuda() cand_flops = self.est.get_flops(self.mutator._cache) if epoch > self.meta_sta_epoch and step > 0 and step % self.update_iter == 0: - slice = self.slices - x = deepcopy(input[:slice].clone().detach()) + slice_ind = self.slices + x = deepcopy(input_data[:slice_ind].clone().detach()) if len(self.best_children_pool) > 0: if self.pick_method == 'top1': @@ -178,9 +178,9 @@ def raw_sgd(w, g): for weight, grad_item in zip(get_model(self.model).rand_parameters(self.mutator._cache), grad_1): del weight.grad - held_out_x = input[slice:slice * 2].clone() + held_out_x = input_data[slice_ind:slice_ind * 2].clone() output_2 = self.model(held_out_x) - valid_loss = self.loss(output_2, target[slice:slice * 2]) + valid_loss = self.loss(output_2, target[slice_ind:slice_ind * 2]) self.optimizer.zero_grad() grad_student_val = torch.autograd.grad(valid_loss, @@ -189,18 +189,20 @@ def raw_sgd(w, g): grad_teacher = torch.autograd.grad(students_weight[0], get_model(self.model).rand_parameters(cand, - self.pick_method == 'meta'), + self.pick_method == 'meta'), grad_outputs=grad_student_val) # update teacher model - for weight, grad_item in zip(get_model(self.model).rand_parameters(cand, self.pick_method == 'meta'), - grad_teacher): + for weight, grad_item in zip( + get_model(self.model).rand_parameters(cand, self.pick_method == 'meta'), + grad_teacher): weight.grad = grad_item torch.nn.utils.clip_grad_norm_( get_model(self.model).rand_parameters(self.mutator._cache, self.pick_method == 'meta'), 1) self.optimizer.step() - for weight, grad_item in zip(get_model(self.model).rand_parameters(cand, self.pick_method == 'meta'), - grad_teacher): + for weight, grad_item in zip( + get_model(self.model).rand_parameters(cand, self.pick_method == 'meta'), + grad_teacher): del weight.grad for item in students_weight: @@ -266,10 +268,10 @@ def raw_sgd(w, g): # best_children_pool = sorted(best_children_pool, reverse=True) if epoch > self.meta_sta_epoch and ( - (len(self.best_children_pool) < self.pool_size) or (prec1 > self.best_children_pool[-1][1] + 5) or ( - prec1 > self.best_children_pool[-1][1] and cand_flops < self.best_children_pool[-1][2])): + (len(self.best_children_pool) < self.pool_size) or (prec1 > self.best_children_pool[-1][1] + 5) or + (prec1 > self.best_children_pool[-1][1] and cand_flops < self.best_children_pool[-1][2])): val_prec1 = prec1 - training_data = deepcopy(input[:self.slices].detach()) + training_data = deepcopy(input_data[:self.slices].detach()) if len(self.best_children_pool) == 0: features = deepcopy(output[:self.slices].detach()) else: @@ -292,7 +294,8 @@ def raw_sgd(w, g): if self.main_proc: for idx, i in enumerate(self.best_children_pool): - logger.info("No.{} {}".format(idx, i[:4])) + logger.info("No.%s %s", idx, i[:4]) + #logger.info("No.{} {}".format(idx, i[:4])) def validate_one_epoch(self, epoch): self.model.eval() From 0adaf7cee2133b367807f3cbe7258ec792d01597 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Sat, 1 Aug 2020 10:34:19 +0800 Subject: [PATCH 14/62] Update mutator.py --- src/sdk/pynni/nni/nas/pytorch/cream/mutator.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py b/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py index f87ba40307..7e1182cafc 100755 --- a/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py +++ b/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py @@ -3,9 +3,9 @@ import logging -import numpy as np +#import numpy as np -from nni.nas.pytorch.mutables import LayerChoice, InputChoice +#from nni.nas.pytorch.mutables import LayerChoice, InputChoice from nni.nas.pytorch.random import RandomMutator _logger = logging.getLogger(__name__) @@ -32,7 +32,9 @@ class CreamSupernetTrainingMutator(RandomMutator): flops_sample_timeout : int Maximum number of attempts to sample before giving up and use a random candidate. """ - def __init__(self, model, how_to_prob='even', pre_prob=(0.05,0.05,0.2,0.4,0.2,0.1), CHOICE_NUM=6, sta_num=(4,4,4,4,4)): + + def __init__(self, model, how_to_prob='even', pre_prob=(0.05, 0.05, 0.2, 0.4, 0.2, 0.1), CHOICE_NUM=6, + sta_num=(4, 4, 4, 4, 4)): super().__init__(model) self.how_to_prob = how_to_prob From 85f01e908c07a526498c3dafe24e43def95b63c3 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 3 Aug 2020 11:29:32 +0800 Subject: [PATCH 15/62] Update Cream.md --- examples/nas/cream/Cream.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/nas/cream/Cream.md b/examples/nas/cream/Cream.md index eeaddb5017..c9184cbab0 100644 --- a/examples/nas/cream/Cream.md +++ b/examples/nas/cream/Cream.md @@ -15,8 +15,10 @@ Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior t | Model (M Flops) | NNI (8Gpus) | Paper (16Gpus) | | ---- |:-------------:| :-----:| +| 14M | testing | testing | | 42M | 65.8 | 66.5 | -| 112M | 72.1 | 72.8 | +| 114M | 72.1 | 72.8 | +| 285M | 76.7 | 77.6 | | 470M | 78.9 | 79.2 | | 600M | 79.4 | 80.0 | From 047fd86ffef512a7143601bfc43abad6ce123318 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 3 Aug 2020 11:29:41 +0800 Subject: [PATCH 16/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 352f535600..c9184cbab0 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -8,8 +8,19 @@ paths is able to boost the training of subnetworks. Since the prioritized paths one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. +For more details, pls refer to the [Paper](https://github.com/microsoft/nni). ## Reproduction Results +Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior than 8 Gpus. + +| Model (M Flops) | NNI (8Gpus) | Paper (16Gpus) | +| ---- |:-------------:| :-----:| +| 14M | testing | testing | +| 42M | 65.8 | 66.5 | +| 114M | 72.1 | 72.8 | +| 285M | 76.7 | 77.6 | +| 470M | 78.9 | 79.2 | +| 600M | 79.4 | 80.0 | ## Examples @@ -19,15 +30,15 @@ settings. Moreover, the experiments on object detection and more challenging sea * python >= 3.6 * torch >= 1.2 * torchscope -* apex (not necessary, please make sure your nvcc CUDA version is the same with pytorch CUDA verision) +* apex ## Data Preparation -You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./data/imagenet` and move the validation set to the subfolder `./data/imagenet/val`. To move the validation set, you cloud use the following script: +You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./examples/nas/cream/data/imagenet` and move the validation set to the subfolder `./examples/nas/cream/data/imagenet/val`. To move the validation set, you cloud use the following script: -Put the imagenet data in ${Root}/data. It should be like following: +Put the imagenet data in ./examples/nas/cream/data. It should be like following: ```buildoutcfg -${Root}/data/imagenet/train -${Root}/data/imagenet/val +./examples/nas/cream/data/imagenet/train +./examples/nas/cream/data/imagenet/val ... ``` @@ -41,7 +52,7 @@ First, build environments for searching. pip install -r ./examples/nas/cream/requirements.txt ``` -To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./examples/nas/cream/supernet.sh` +To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./examples/nas/cream/run.sh` ```buildoutcfg --flops_minimum 0 # Minimum Flops of Architecture --flops_maximum 600 # Maximum Flops of Architecture @@ -49,7 +60,7 @@ To search for an architecture, you need to configure the parameters `flops_minim After you specify the flops of the architectures you would like to search, you can search an architecture now by running: ```buildoutcfg -sh ./experiments/scripts/supernet.sh +sh ./examples/nas/cream/run.sh ``` @@ -63,15 +74,17 @@ To test our trained of models, you need to use `model_selection` in `./examples/ After specifying the flops of the model, you need to write the path to the resume model in `./examples/nas/cream/test.sh`. ```buildoutcfg ---resume './experiments/ckps/42.pth.tar' ---resume './experiments/ckps/470.pth.tar' +--resume './examples/nas/cream/experiments/ckps/42.pth.tar' +--resume './examples/nas/cream/experiments/ckps/470.pth.tar' ...... ``` We provide 14M/42M/114M/285M/470M/600M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). -After downloading the pretrained models and adding `--model_selection` and `--resume` in './experiments/scripts/test.sh', you need to use the following command to test the model. +After downloading the pretrained models and adding `--model_selection` and `--resume` in './examples/nas/cream/test.sh', you need to use the following command to test the model. ```buildoutcfg -sh ./experiments/scripts/test.sh +sh ./examples/nas/cream/test.sh ``` The test result will be saved in `./retrain`. You can configure the `--ouput` in `./examples/nas/cream/test.sh` to specify a path for it. + + From b22f92c2056c3701d01e586d0305e612f67cfb03 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 3 Aug 2020 11:39:28 +0800 Subject: [PATCH 17/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index c9184cbab0..1e46b3fc1f 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -15,7 +15,7 @@ Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior t | Model (M Flops) | NNI (8Gpus) | Paper (16Gpus) | | ---- |:-------------:| :-----:| -| 14M | testing | testing | +| 14M | testing | 59.6 | | 42M | 65.8 | 66.5 | | 114M | 72.1 | 72.8 | | 285M | 76.7 | 77.6 | From 3ee7591f484473bde480fedaf462f9bc41fec427 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 3 Aug 2020 11:39:35 +0800 Subject: [PATCH 18/62] Update Cream.md --- examples/nas/cream/Cream.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/nas/cream/Cream.md b/examples/nas/cream/Cream.md index c9184cbab0..1e46b3fc1f 100644 --- a/examples/nas/cream/Cream.md +++ b/examples/nas/cream/Cream.md @@ -15,7 +15,7 @@ Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior t | Model (M Flops) | NNI (8Gpus) | Paper (16Gpus) | | ---- |:-------------:| :-----:| -| 14M | testing | testing | +| 14M | testing | 59.6 | | 42M | 65.8 | 66.5 | | 114M | 72.1 | 72.8 | | 285M | 76.7 | 77.6 | From be81d53f5c263560ff5d347ea7734911c766b109 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 3 Aug 2020 22:40:58 +0800 Subject: [PATCH 19/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 1e46b3fc1f..ec628d41bb 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -57,6 +57,7 @@ To search for an architecture, you need to configure the parameters `flops_minim --flops_minimum 0 # Minimum Flops of Architecture --flops_maximum 600 # Maximum Flops of Architecture ``` +For example, if you expect to search an architecture with model Flops <= 200M, pls set the `flops_minimum` and `flops_maximum` to be `0` and `200`. After you specify the flops of the architectures you would like to search, you can search an architecture now by running: ```buildoutcfg From 95354668bc362e5685ec31d86af160c6e82a5400 Mon Sep 17 00:00:00 2001 From: Yuge Zhang Date: Tue, 4 Aug 2020 14:59:10 +0800 Subject: [PATCH 20/62] Fix pipeline for merging into NNI --- docs/en_US/NAS/Cream.md | 61 ++++++++----- docs/en_US/NAS/one_shot_nas.rst | 3 +- examples/nas/cream/Cream.md | 91 +------------------ examples/nas/cream/run.sh | 10 +- examples/nas/cream/test.sh | 2 +- .../pynni/nni/nas/pytorch/cream/__init__.py | 3 +- .../pynni/nni/nas/pytorch/cream/mutator.py | 7 +- .../pynni/nni/nas/pytorch/cream/trainer.py | 19 ++-- 8 files changed, 54 insertions(+), 142 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index ec628d41bb..6d16016799 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -8,7 +8,7 @@ paths is able to boost the training of subnetworks. Since the prioritized paths one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. -For more details, pls refer to the [Paper](https://github.com/microsoft/nni). +For more details, please refer to the paper (coming soon). ## Reproduction Results Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior than 8 Gpus. @@ -22,70 +22,81 @@ Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior t | 470M | 78.9 | 79.2 | | 600M | 79.4 | 80.0 | -## Examples - -[Example code](https://github.com/microsoft/nni/tree/master/examples/nas/cream) - ## Requirements * python >= 3.6 * torch >= 1.2 * torchscope * apex -## Data Preparation -You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./examples/nas/cream/data/imagenet` and move the validation set to the subfolder `./examples/nas/cream/data/imagenet/val`. To move the validation set, you cloud use the following script: +## Examples + +[Example code](https://github.com/microsoft/nni/tree/master/examples/nas/cream) + +Please run the following scripts in the example folder. + +## Data Preparation + +You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./data/imagenet` and move the validation set to the subfolder `./data/imagenet/val`. To move the validation set, you cloud use the following script: + +Put the imagenet data in `./data`. It should be like following: -Put the imagenet data in ./examples/nas/cream/data. It should be like following: ```buildoutcfg -./examples/nas/cream/data/imagenet/train -./examples/nas/cream/data/imagenet/val +./data/imagenet/train +./data/imagenet/val ... ``` - ## Quick Start ### I. Search First, build environments for searching. + ``` -pip install -r ./examples/nas/cream/requirements.txt +pip install -r ./requirements.txt ``` -To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./examples/nas/cream/run.sh` +To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./run.sh` + ```buildoutcfg --flops_minimum 0 # Minimum Flops of Architecture --flops_maximum 600 # Maximum Flops of Architecture ``` -For example, if you expect to search an architecture with model Flops <= 200M, pls set the `flops_minimum` and `flops_maximum` to be `0` and `200`. + +For example, if you expect to search an architecture with model Flops <= 200M, please set the `flops_minimum` and `flops_maximum` to be `0` and `200`. After you specify the flops of the architectures you would like to search, you can search an architecture now by running: -```buildoutcfg -sh ./examples/nas/cream/run.sh +```buildoutcfg +sh ./run.sh ``` +Searched model needs to be retrained to obtain the final model. Retraining code will be released soon. + ### II. Test -To test our trained of models, you need to use `model_selection` in `./examples/nas/cream/test.sh` to specify which model to test. + +To test our trained of models, you need to use `model_selection` in `./test.sh` to specify which model to test. + ```buildoutcfg --model_selection 42 # test 42m model --model_selection 470 # test 470m model ...... ``` -After specifying the flops of the model, you need to write the path to the resume model in `./examples/nas/cream/test.sh`. +After specifying the flops of the model, you need to write the path to the resume model in `./test.sh`. + ```buildoutcfg ---resume './examples/nas/cream/experiments/ckps/42.pth.tar' ---resume './examples/nas/cream/experiments/ckps/470.pth.tar' +--resume './data/ckpts/42.pth.tar' +--resume './data/ckpts/470.pth.tar' ...... ``` We provide 14M/42M/114M/285M/470M/600M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). -After downloading the pretrained models and adding `--model_selection` and `--resume` in './examples/nas/cream/test.sh', you need to use the following command to test the model. -```buildoutcfg -sh ./examples/nas/cream/test.sh -``` -The test result will be saved in `./retrain`. You can configure the `--ouput` in `./examples/nas/cream/test.sh` to specify a path for it. +After downloading the pretrained models and adding `--model_selection` and `--resume` in './test.sh', you need to use the following command to test the model. +```buildoutcfg +sh ./test.sh +``` +The test result will be saved in `./retrain`. You can configure the `--output` in `./test.sh` to specify a path for it. diff --git a/docs/en_US/NAS/one_shot_nas.rst b/docs/en_US/NAS/one_shot_nas.rst index cc7fa688b6..77b3cfcc94 100644 --- a/docs/en_US/NAS/one_shot_nas.rst +++ b/docs/en_US/NAS/one_shot_nas.rst @@ -14,4 +14,5 @@ One-shot NAS algorithms leverage weight sharing among models in neural architect SPOS CDARTS ProxylessNAS - TextNAS \ No newline at end of file + TextNAS + Cream diff --git a/examples/nas/cream/Cream.md b/examples/nas/cream/Cream.md index 1e46b3fc1f..a871bddf78 100644 --- a/examples/nas/cream/Cream.md +++ b/examples/nas/cream/Cream.md @@ -1,90 +1 @@ -# Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search - -## Introduction -One-shot weight sharing methods have recently drawn great attention in neural architecture search due to high efficiency and competitive performance. However, weight sharing across models has an inherent deficiency, i.e., insufficient training -of subnetworks in the hypernetwork. To alleviate this problem, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training -process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized -paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. We directly select the most promising -one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the -convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned -settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. -For more details, pls refer to the [Paper](https://github.com/microsoft/nni). - -## Reproduction Results -Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior than 8 Gpus. - -| Model (M Flops) | NNI (8Gpus) | Paper (16Gpus) | -| ---- |:-------------:| :-----:| -| 14M | testing | 59.6 | -| 42M | 65.8 | 66.5 | -| 114M | 72.1 | 72.8 | -| 285M | 76.7 | 77.6 | -| 470M | 78.9 | 79.2 | -| 600M | 79.4 | 80.0 | - -## Examples - -[Example code](https://github.com/microsoft/nni/tree/master/examples/nas/cream) - -## Requirements -* python >= 3.6 -* torch >= 1.2 -* torchscope -* apex - -## Data Preparation -You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./examples/nas/cream/data/imagenet` and move the validation set to the subfolder `./examples/nas/cream/data/imagenet/val`. To move the validation set, you cloud use the following script: - -Put the imagenet data in ./examples/nas/cream/data. It should be like following: -```buildoutcfg -./examples/nas/cream/data/imagenet/train -./examples/nas/cream/data/imagenet/val -... -``` - - -## Quick Start - -### I. Search - -First, build environments for searching. -``` -pip install -r ./examples/nas/cream/requirements.txt -``` - -To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./examples/nas/cream/run.sh` -```buildoutcfg ---flops_minimum 0 # Minimum Flops of Architecture ---flops_maximum 600 # Maximum Flops of Architecture -``` - -After you specify the flops of the architectures you would like to search, you can search an architecture now by running: -```buildoutcfg -sh ./examples/nas/cream/run.sh - -``` - -### II. Test -To test our trained of models, you need to use `model_selection` in `./examples/nas/cream/test.sh` to specify which model to test. -```buildoutcfg ---model_selection 42 # test 42m model ---model_selection 470 # test 470m model -...... -``` - -After specifying the flops of the model, you need to write the path to the resume model in `./examples/nas/cream/test.sh`. -```buildoutcfg ---resume './examples/nas/cream/experiments/ckps/42.pth.tar' ---resume './examples/nas/cream/experiments/ckps/470.pth.tar' -...... -``` - -We provide 14M/42M/114M/285M/470M/600M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). -After downloading the pretrained models and adding `--model_selection` and `--resume` in './examples/nas/cream/test.sh', you need to use the following command to test the model. -```buildoutcfg -sh ./examples/nas/cream/test.sh -``` - -The test result will be saved in `./retrain`. You can configure the `--ouput` in `./examples/nas/cream/test.sh` to specify a path for it. - - +[Documentation](https://nni.readthedocs.io/en/latest/NAS/Cream.html) diff --git a/examples/nas/cream/run.sh b/examples/nas/cream/run.sh index 35c57ebe06..c91122daa4 100755 --- a/examples/nas/cream/run.sh +++ b/examples/nas/cream/run.sh @@ -1,6 +1,6 @@ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ./examples/nas/cream/distributed_train.sh 8 \ ---data ../NIPS20_release/data/imagenet/ --sched spos_linear \ ---pool_size 10 --meta_sta_epoch 20 --update_iter 200 \ ---epochs 120 --batch-size 128 --warmup-epochs 0 \ ---lr 0.5 --opt-eps 0.001 \ ---color-jitter 0.06 --drop 0. -j 8 --num-classes 1000 --flops_minimum 0 --flops_maximum 600 + --data ./data/imagenet/ --sched spos_linear \ + --pool_size 10 --meta_sta_epoch 20 --update_iter 200 \ + --epochs 120 --batch-size 128 --warmup-epochs 0 \ + --lr 0.5 --opt-eps 0.001 \ + --color-jitter 0.06 --drop 0. -j 8 --num-classes 1000 --flops_minimum 0 --flops_maximum 600 diff --git a/examples/nas/cream/test.sh b/examples/nas/cream/test.sh index a257ba5ea4..627d187b92 100755 --- a/examples/nas/cream/test.sh +++ b/examples/nas/cream/test.sh @@ -1,2 +1,2 @@ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ./examples/nas/cream/distributed_test.sh 8 \ ---data ~/data_local/imagenet --model_selection 285 --resume ~/data_local/nips_ckp/285m/model_best.pth.tar # 0.06 --drop 0. -j 8 --num-classes 1000 --flops_minimum 0 --flops_maximum 600 + --data ./data/imagenet --model_selection 285 --resume ./data/ckpts/285.pth.tar diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/__init__.py b/src/sdk/pynni/nni/nas/pytorch/cream/__init__.py index d7a61ddad9..2429e996f3 100755 --- a/src/sdk/pynni/nni/nas/pytorch/cream/__init__.py +++ b/src/sdk/pynni/nni/nas/pytorch/cream/__init__.py @@ -1,6 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. -# from .mutator import RegularizedDartsMutator, RegularizedMutatorParallel, DartsDiscreteMutator from .trainer import CreamSupernetTrainer -from .mutator import CreamSupernetTrainingMutator \ No newline at end of file +from .mutator import CreamSupernetTrainingMutator diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py b/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py index 7e1182cafc..37f154a1e9 100755 --- a/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py +++ b/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py @@ -3,9 +3,6 @@ import logging -#import numpy as np - -#from nni.nas.pytorch.mutables import LayerChoice, InputChoice from nni.nas.pytorch.random import RandomMutator _logger = logging.getLogger(__name__) @@ -20,7 +17,7 @@ class CreamSupernetTrainingMutator(RandomMutator): model : nn.Module PyTorch model. flops_func : callable - Callable that takes a candidate from `sample_search` and returns its candidate. When `flops_func` + Callable that takes a candidate from ``sample_search`` and returns its candidate. When ``flops_func`` is None, functions related to flops will be deactivated. flops_lb : number Lower bound of flops. @@ -52,7 +49,7 @@ def get_prob(self): def sample_search(self): """ - Sample a candidate for training. When `flops_func` is not None, candidates will be sampled uniformly + Sample a candidate for training. When ``flops_func`` is not None, candidates will be sampled uniformly relative to flops. Returns diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py index 1e43f92bab..0ff43b6509 100755 --- a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py +++ b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py @@ -11,8 +11,6 @@ from nni.nas.pytorch.trainer import Trainer from nni.nas.pytorch.utils import AverageMeterGroup -#from .mutator import CreamSupernetTrainingMutator - logger = logging.getLogger(__name__) @@ -85,7 +83,7 @@ def __init__(self, model, loss, def cross_entropy_loss_with_soft_target(self, pred, soft_target): logsoftmax = nn.LogSoftmax() - return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1)) + return torch.mean(torch.sum(-soft_target * logsoftmax(pred), 1)) def reduce_tensor(self, tensor): rt = tensor.clone() @@ -109,10 +107,7 @@ def accuracy(self, output, target, topk=(1,)): def train_one_epoch(self, epoch): def get_model(model): - #try: return model.module - #except: - # return model meters = AverageMeterGroup() for step, (input_data, target) in enumerate(self.train_loader): @@ -129,7 +124,7 @@ def get_model(model): slice_ind = self.slices x = deepcopy(input_data[:slice_ind].clone().detach()) - if len(self.best_children_pool) > 0: + if self.best_children_pool: if self.pick_method == 'top1': meta_value, cand = 1, sorted(self.best_children_pool, reverse=True)[0][3] elif self.pick_method == 'meta': @@ -214,7 +209,7 @@ def raw_sgd(w, g): raise ValueError("Must 1nd or 2nd update teacher weights") # get_best_teacher - if len(self.best_children_pool) > 0: + if self.best_children_pool: if self.pick_method == 'top1': meta_value, cand = 0.5, sorted(self.best_children_pool, reverse=True)[0][3] elif self.pick_method == 'meta': @@ -224,7 +219,7 @@ def raw_sgd(w, g): output = F.softmax(self.model(inputx), dim=1) weight = get_model(self.model).forward_meta(output - item[4]) if weight > meta_value: - meta_value = weight # deepcopy(torch.nn.functional.sigmoid(weight)) + meta_value = weight cand_idx = now_idx cand = self.arch_dict[(self.best_children_pool[cand_idx][0], self.best_children_pool[cand_idx][2])] @@ -233,7 +228,7 @@ def raw_sgd(w, g): else: raise ValueError('Method Not supported') - if len(self.best_children_pool) == 0: + if not self.best_children_pool: output = self.model(input) loss = self.loss(output, target) kd_loss = loss @@ -266,13 +261,12 @@ def raw_sgd(w, g): metrics = self.reduce_metrics(metrics, self.distributed) meters.update(metrics) - # best_children_pool = sorted(best_children_pool, reverse=True) if epoch > self.meta_sta_epoch and ( (len(self.best_children_pool) < self.pool_size) or (prec1 > self.best_children_pool[-1][1] + 5) or (prec1 > self.best_children_pool[-1][1] and cand_flops < self.best_children_pool[-1][2])): val_prec1 = prec1 training_data = deepcopy(input_data[:self.slices].detach()) - if len(self.best_children_pool) == 0: + if self.best_children_pool: features = deepcopy(output[:self.slices].detach()) else: features = deepcopy(teacher_output[:self.slices].detach()) @@ -295,7 +289,6 @@ def raw_sgd(w, g): if self.main_proc: for idx, i in enumerate(self.best_children_pool): logger.info("No.%s %s", idx, i[:4]) - #logger.info("No.{} {}".format(idx, i[:4])) def validate_one_epoch(self, epoch): self.model.eval() From 0892e667c0d26a85ffb08380a455e78dfead5ed8 Mon Sep 17 00:00:00 2001 From: Yuge Zhang Date: Tue, 4 Aug 2020 15:04:13 +0800 Subject: [PATCH 21/62] Fix typo --- src/sdk/pynni/nni/nas/pytorch/cream/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py index 0ff43b6509..b6a28340d4 100755 --- a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py +++ b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py @@ -266,7 +266,7 @@ def raw_sgd(w, g): (prec1 > self.best_children_pool[-1][1] and cand_flops < self.best_children_pool[-1][2])): val_prec1 = prec1 training_data = deepcopy(input_data[:self.slices].detach()) - if self.best_children_pool: + if not self.best_children_pool: features = deepcopy(output[:self.slices].detach()) else: features = deepcopy(teacher_output[:self.slices].detach()) From 2e13a23c469f5a0742a19f630360a852f4767a85 Mon Sep 17 00:00:00 2001 From: Yuge Zhang Date: Wed, 5 Aug 2020 11:21:48 +0800 Subject: [PATCH 22/62] Fix pipeline --- docs/en_US/NAS/Cream.md | 12 ++++++------ examples/nas/cream/dataset/loader.py | 1 + examples/nas/cream/utils/flops_table.py | 4 ++-- examples/nas/cream/utils/scheduler.py | 2 +- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 6d16016799..8a86002628 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -40,7 +40,7 @@ You need to first download the [ImageNet-2012](http://www.image-net.org/) to the Put the imagenet data in `./data`. It should be like following: -```buildoutcfg +``` ./data/imagenet/train ./data/imagenet/val ... @@ -58,7 +58,7 @@ pip install -r ./requirements.txt To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./run.sh` -```buildoutcfg +``` --flops_minimum 0 # Minimum Flops of Architecture --flops_maximum 600 # Maximum Flops of Architecture ``` @@ -67,7 +67,7 @@ For example, if you expect to search an architecture with model Flops <= 200M, p After you specify the flops of the architectures you would like to search, you can search an architecture now by running: -```buildoutcfg +``` sh ./run.sh ``` @@ -77,7 +77,7 @@ Searched model needs to be retrained to obtain the final model. Retraining code To test our trained of models, you need to use `model_selection` in `./test.sh` to specify which model to test. -```buildoutcfg +``` --model_selection 42 # test 42m model --model_selection 470 # test 470m model ...... @@ -85,7 +85,7 @@ To test our trained of models, you need to use `model_selection` in `./test.sh` After specifying the flops of the model, you need to write the path to the resume model in `./test.sh`. -```buildoutcfg +``` --resume './data/ckpts/42.pth.tar' --resume './data/ckpts/470.pth.tar' ...... @@ -95,7 +95,7 @@ We provide 14M/42M/114M/285M/470M/600M pretrained models in [google drive](https After downloading the pretrained models and adding `--model_selection` and `--resume` in './test.sh', you need to use the following command to test the model. -```buildoutcfg +``` sh ./test.sh ``` diff --git a/examples/nas/cream/dataset/loader.py b/examples/nas/cream/dataset/loader.py index fa6199b200..7f00135879 100755 --- a/examples/nas/cream/dataset/loader.py +++ b/examples/nas/cream/dataset/loader.py @@ -119,6 +119,7 @@ def __init__(self, def __iter__(self): stream = torch.cuda.Stream() first = True + input = target = None for next_input, next_target in self.loader: with torch.cuda.stream(stream): diff --git a/examples/nas/cream/utils/flops_table.py b/examples/nas/cream/utils/flops_table.py index b46476290b..b4d60d849e 100755 --- a/examples/nas/cream/utils/flops_table.py +++ b/examples/nas/cream/utils/flops_table.py @@ -54,7 +54,7 @@ def __init__(self, model, input_shape=(1, 3, 224, 224), device='cpu'): def get_params(self, arch): params = 0 for block_id, block in enumerate(arch.keys()): - if block is 'LayerChoice1' or block is 'LayerChoice23': + if block == 'LayerChoice1' or block == 'LayerChoice23': continue for idx, choice in enumerate(arch[block]): params += self.params_dict[block_id][idx] * (choice is True) @@ -64,7 +64,7 @@ def get_params(self, arch): def get_flops(self, arch): flops = 0 for block_id, block in enumerate(arch.keys()): - if block is 'LayerChoice1' or block_id is 'LayerChoice23': + if block == 'LayerChoice1' or block_id == 'LayerChoice23': continue for idx, choice in enumerate(arch[block]): flops += self.flops_dict[block_id][idx] * (1 if choice else 0) diff --git a/examples/nas/cream/utils/scheduler.py b/examples/nas/cream/utils/scheduler.py index 8826bd8b22..172f20455a 100755 --- a/examples/nas/cream/utils/scheduler.py +++ b/examples/nas/cream/utils/scheduler.py @@ -31,7 +31,7 @@ def __init__(self, initialize: bool = True) -> None: self.optimizer = optimizer self.param_group_field = param_group_field - self._initial_param_group_field = f"initial_{param_group_field}" + self._initial_param_group_field = "initial_" + param_group_field if initialize: for i, group in enumerate(self.optimizer.param_groups): if param_group_field not in group: From 22a3f46f699daba720f9ab7f07f777e2430d22cd Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 7 Aug 2020 15:44:03 +0800 Subject: [PATCH 23/62] Add files via upload --- docs/img/cream.jpg | Bin 0 -> 98959 bytes docs/img/cream_flops100.jpg | Bin 0 -> 105849 bytes docs/img/cream_flops600.jpg | Bin 0 -> 106935 bytes 3 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/img/cream.jpg create mode 100644 docs/img/cream_flops100.jpg create mode 100644 docs/img/cream_flops600.jpg diff --git a/docs/img/cream.jpg b/docs/img/cream.jpg new file mode 100644 index 0000000000000000000000000000000000000000..715472b1deaf95a771399649c2efdb9bf0821bcc GIT binary patch literal 98959 zcmeFZ2Urx(wlCVl3=BEvER2#RDLEraRDwh$Nk(!GB14cUl7k|WGf2)k2SK6)Ns^-= zIS$Mn_kZuR^}o;A`@DPaci(&W+dW@*S9e!e)vB&q>sKobatgTs5Z;$pk_SL&AfOlZ z1CUGDLQ1l-hN|i+@=6cn01yZOuvwL5A3jC{0KnGH#YtU3hEYfN2_tj{fB-nC9~&U@ z%*5H@;bVCXR2JZ#+=B-{&%bXc;{d8802t#?R=Fqr`?G&}g>B;C$;!ggg^|^SjggOsmzVLLnT-vjlAX!zpU;2x$ImkW zoXq}}P7xDVC$~TDL4Kal0c?N(AO)xZdVm>V1-Jk{KoAfEqyc$A8Bhf@fhT|gU;L0WLux z5EO(5A^}l>7(h2c93VcB5J&3Y7za!az5(U}3xQ?9D&QwzbFdTG2OI`Y0B3-U!S&!C@ECXj z{2hD_!GMrJ7$IB`QHUZ$6Ji2!g!n+hAxV%tNHwGrG6q?M>_M*4@z80}InYJWmC$w2 zEzsT3gU}PubI_~MyU@qcSJ6)}Ffhn5STF=J6ftx$o@2bg2*>zC`%qN(3nEsdvm<5>4n4_5Mn3q_D zSU0gmuvD;2u{^ONv9hr0v4*f#u`aQRuvxIhu{E%tWBXyh#V*F~!k)!G#KFN~!V$(% z!?D8g#d(WUiqng;h;xohjLVKIi>rt0iW`BOi`$C(755Mi5APP96y6g&7rY3(e7p|4 zdAu`x68zivO8BPuaQyf9)%c_My98JSHwmN(3<$gk5(p{?h6uhBViDdVlqGyd=tKCP zu#Rw&@R*2%h?hu($d)LasDP-CXoDC^e2Z9)*p&DcaVBvG@iGaRgqcK^#DpY(B#Wer zWR(ngkVlf2lTVOeP|#6G zQxexP%%=;QCU+(Q`Jz-QA4QNsnw|6sZ*%CsK3*Y z(umTS(1g*H(@fK%(X!L3)4rh1q#d9=rlY4*pmU&mN7q63ot~UtivBr$9DOtWCIbnB z1cMbr977Ak79$y>G@}jUTgFbty&JSQ6mPiRNWU?7fx3q5s->SK_!9vNR#Nx$L$TG`{$12I{ z$eO|Wg$;vEh|QYq1KTh=8v9*#OZNBdLmX%v0vuKxA2>!h(K&@V?Km?yC%JIBq`BO< z3b__}D?jG)Ip1V9YJXt(5yu`fsdHs3oc@Oy5`AqoU^L^pR zY|)LoLh5AO!wZNGaZASmD@P%N-12op3COctCJA{9~<3K!}X zMi-V5h6^_epNj~HxQLXC{1D|5wH7T9-4J6DGZV`aTNY;$e0KEf87G-4nNwL2*_X1d_rUiQ?uFeOlp~hYluMGE zmuHfcO zWHcf*W;9tfT{YXa2(-ykE*BL*V{B2%I;qU@q3q9vpAVu)hAVwPhc##YBM z#)ZWl#T&-=CEQ6!O~g)gOdkxD5} zrAvLCdXZ+8HkGcBUY&6(BOwzb(=Br?ODn4qYCU8g4h_Hr{A_-$d3F*^J%%su|hh)pFeG(E7d2qHVd| zuzjXOr{hbfTIXPwa#wG+e0N8WbWcmKcyD8$NMGG2p-(ma0{ztkcL%CI-~C)QC@@$( zBsf$%EIeF4A~w=IDmmIVc5kfvi_(|=@kiq$6PgoKlX{a2Q>IfJUv0kbPrFTD&iKxv z&xXwr&Lzy#&S!jM`&P0bu+X%4Z*gGh@zTt)$@2D!^9o}1)f(Pf!aBox{s!MhFllUJMAM6!VjqqbC2#EwI8b<&!57w^wXe_1DVRvq)Pc@(&F17c_9=Cq81-{#5eh^#;>Z5-^zX^P-T&M9uTTDtO}zO4 zKnfcLMMnRAPZx#N%2NPV`wKp~cQ1A)$266_F1#mGj2`~w0h)8LeX=y2_8DY0> z(K5mqZ{Fc%gt4-4a0qj-u(I*+-R0w96BI>le>wb2{B`?T2nY`chY+6-C1qfItT$Ns zMDOtN@%=5sf1IU$ZlZrV{`v5;5VZg0LRtYLOi(L&D;Qt`Ktv!kA~a+tz8 zpI>x3CbLXg8=3mZ9*p1E$sY@woPv^y`sOWE_psl&DHcF4O)YKI z*D^6RGqgMj@=@sxQFeo@AG%O}IEYCcR z`u2{_uI`@RzE7iLU&bdUr@l@vE-kODuB~ruZtWi&9vz>Yo}FL(qzeRq{~+r>DEkv# zL@2t@AP_JF`jajYn)^?}i6H1Vcrl2j)u7KDNtpQjFiB;i)63ehnEBQB$c&vvu*qR} z7H{tVB<(Mh{r3p-|DU4lKM4CfUDE(A81)H=(1-v@;P?q^hA;MiwsWNJQ-s(Zu`!`x z&atEjslI#n)W!EeyUz$SCHJSXWUfGx)?68Z5C<+#>x*VNMEVM9Y>n)pWsK^wy z#Nswcc2nNOFV+`J938Oyk5>GpH5#MG$}gYK+1bu`+b7eKU1sGk`ie{MNRe-3gaND> z@9y;31O>3aUaeaNzMu@%7vKn^a=@y<30TS1O2VzNVSFY zJ;E+kT7m|ic6M*{uhvXcIuh^R432V8{4VJHtqE{((Rx&oKewqBv}*hYk@s!|!(7is zzmx6Yy}RreZhHfUwCCE?RrPwx)bi!s5q>Z}TA?A~H$3tRGA}NiK)G_Hs-WNY|9of+ zL%yiV0i=J~|LM?3!xLeT2==;&52wHfte(B;HF)O=S%&QoeKbfz3u8s?reinjDV)eO zh?QvpC@jVcWMH^d^4i`337C!gnH&2m*T7;C95mdpJwAmzoRK{6m|gK=S$IE30n;+) zcQW}$7)q^M*4U>@w}NwP73yRZhju&ei$;_MQ7z{<8@T2ocqb4Kv|emJFbX#}w=%O{*L2z+_uYKSLq%8z-nWuGoPb~MN?r>}QMde-pA_CcIxM-q zjs$KfBY{pWZ3NpxmTRveB=E8a33P8Dfk3t_Brv3l1b);afwMW_ITHBo6*VM)qb!94 zQd1Fu+(>{(*KjZUPU`u#qSW5bQX@Vi#$_LQb8Q2USCEcfj7e#CBNqVH|78S=Kmh;|DNan_i{KJtLs~1uIh#8 zosULw3xf5p_)nc{2}S(*L?>_l!^j_)kibl*DOzXrmC)^Wa_*b=H8M$?^87Ib(C5l| z#EI;-?0s60b$-WWgjN)xkDcDr`}G`O)WlcXbkiVp(U9`%Svj!Jqsx1EPjSAs+heye zT!6=U5MK(I&j@1$e%tFx&J>Rsb{M`RfeN`bPkJ|>(Mz`wn=W|s^-YZvD zHib(aCJ^!U!rq(&E(j0>n3V-dVbKyRl7gD#$kdm2;FhoZ4-h4(pXVCjDzSD#+ou&X z*}&63YRbP(JZU+%IDvCqmGBSDAp!55nX~n2U#=0=4p+Jk5w^Zi{~D;jrXf8N-~jiO ze3Z+*DmhKHJU{{yHHoXcNI<)LZl>jRv18AFlxf;EjwOX-w7@%!^&JaXc(HJW2S|jq z?IqMX7T;|9p!Ni(?im`NV!Q3GSBi=d)e+f$f<^xdb!Zbi6?n5>qjSn<+sPF3Ikx!4 zV5roAzf828d>%ratDxlPQ~3Tx*Z$|W+L%$H<@f=Lz5ADp@8a0~m2mSkmYIE(cESVn zNw)TS&<3^iiyQ!d{i4TANkt$LQoyadVBl3#3Hw12v5~BA(;$O~2X@~1fJ~bQ}uOdqlW`4c?!pKj9(eOQS>btPx zKCrmkkt}x?_c@@41ZKFesN+qZet}KctX9PWd+210Tgxy3T6;W*H122rUjh*zL^^ zqUCtpnu>vDQ$9x487W~iFw2-|Q{G-jW&K3DU?O|g(rl&@ZgjwEPgR|(fwVXQOI|OA zFUQnb_4>V_Yta*3!$(v1!UXI|&`i|R6(1*~NikY9<50RjbXO~lq1u^WTky;r44JCG zJu1xlK(WJq&;R=)c_m;iem;>U)Xr6r%q-mCwISO|%{$e%B#w|c8hLFhiAM17cdHcU zaqgY2_fLlChV>&km@xf%<>Pp77*frXNe@Z)e3l|ZsN{;Cy;gBe(d2vW%Uk;DE4hzq zZwFUwZK<(GHxI8J+mE9rFAm+^6h)T6X`Vj9n`1OlaZ(NxT&Zy`R6@dHChK3H3sCgq zRw&rnfkP->L<;Z}9;`UF^=(LGq5YtgcD|b|xZgSdGHa7DLNY}rySnm49Ic%?&a%^< z!T`n=@LK9ivDw6~%}RaDy+E~0bS6+YcCRu_`9q9%BT+BoI!1Qo#H|sR{3_A>Bp1;X z#!sOK>zjM4bE#7+m2QX1V#Iy4wlTNM4IY>IfoK!KqM9My zyh930Dg-$-+5JXurAB;xy$i4jRUPmt*g>tEqrU70n-`N`J4a?C#pRcYyQuua0xruM zh=CrkBU~MA1^x3rMcA&z-rl%Pmd-(*B3qLmA3GU(DaKeWvFh$lL{)muS8bCCIpiNn)7YV~z=+xDbz>w;Rg_+V!Clc&0$jH9{IxEVEE z4d>V}HT`BkN4fc9hx0O$X}2sXkJpc~u*Q@nQK@Oj-o6?L$9QbMN=2s#&J_z=b!ifB zyo*VXLk!o_@-=%70bRGm1 z?45Ow+HyYkGdl=z_M@^m#{oyZ_hn#PiOLL*)@>o6dUk2^;4<}6eg)Ab%VWq%-U_%f z3jt^f55k7_CJ~{i1)xC<{DQO+RLI@=K}1O1p}f(y=0%oV4BsLqe#dI>JRfNpG~5E% zo!uH~4;cyIjWZ)>$|_w*fRq;xW1hbT^#ZGLwA|-$NMM%&2~-~sPTdbs9b>As(qgO3 zi{XoDR9>UyrHLPlA(I9R-{cb^1kG$TGX+15v6N9FO&A~>)dU4C#tW-|l5T5*rZi!~ z>`UcDjgw6O-E!h4hf|_}@PHwSc-h6-K8M7?SA5mk4$_aK zq^G1It{7Da)Y6;24+$7*rCvNH12p%{bMKB~myiPrcJBCOgXH#%;{o26R>?;z`ixE7yk~mq5jiR)l_Ss0osi#lJh);j2_o$6I zVw89QzMXV!dIUJBfRk=}Kc#z}n+h3`yU`jya?eBvsHUxa77EyF3XAy#gyX4OR1M$h zJJ4b^E%Q{AY)$v_>&NTz0FoD$!@ljmnmDDt*qn3fHg7}))_K_p&vrUh~MgNEK{;i$no~ekEBDxF>n{`L5ua0R4!vDk1VrDN^tm! zJ3#pGh4SXn8@q0f_J#hOB3-vn=Q9D41CMa%(unz#IJWRCJ%UW_iL5L!XEQdUQ=}|Q z@x}4+C4xtN1%J4zl-Ist_RQ*DivHgFIEpgSR27Soc4c9K@MHMXIUfR|4eg41bZ)W^ z;pMj{wR8qGB0N{z#T`6*siHi!C0H7&>Z~;SNV)L8W(VHS@NTtnnLFfV1f)7r@SW{d zM_sO%`X4Ks8tpva;YjoZnMcTgEX+lHMc+C&YG*xE@*CU@pk1f)#P|_bqV`U5EKKnn69m-f*${eJ9Fu}lt%@GLTjz|A@}D{m4Pm=6~B}QuN!5aOH;-3m`qkCmvW`Gw}hVPlHm^rJkJWlA$C$U zLW>SBFHQVcIJJXAYh)L}?P;C&gky-5yw4V93BIpN5=86W10x2Yk-G`(Ad`;ioI+=3 zXUlGRwW@`brSVKX$2uy7*cg64U;ZGww@*qI`i_T)zCN8-b_;1XAudEqaMfa8G=PG&_}*W_pWs)k^WTG@1=y%c{jza#E!c!Z0R|yh!s~} z`z*rjQhgMj`F{c}4=1~K0h$%flibG~GdEKhZyfkI6VX7TdC!?=-HE3L&oL<%2pw=3 z$Y(dAzV-O@b8OCFM$3O*GJ5b_gALo+R}q4H+Gb+Xfj_$a z)p1YAkyuJ5BXwuP=*m9VU_M$=+3OMORk4Iab`mr@#+U%hPs=QmzFvel^*IfBQ%ble zKb#%JM%M4y=u=qH#@&KFt2vW)V_}B*Iw1j)&B1G?u{n$Q2N?^vHa7(1Qsek|`b~E^ zDN}gEadpPT6bcMyUJF#Eh^95rYuzIPN4;S+mTBg1WDiIQh)hadW{~JtD{a;>OA>W< zpp|Nv^(+fCOk|G0SCKwuBt-icAl2M2U$6h;?AJb5SSWU;JoY_sI9XJJVxlPQ+=Dqv zdJs=}8S}PzMbNd-`=QDYFruErUXALBNT+}|pQOF*3^31q=XC!i$n3=k%qbIio&7L> z?!j34>^_EuN>mx)AvO3NNL`8HwiJ7j6%@@SxgJXUGxRw2DSC^+|i_CH0LwP49k1CF}XjtLr zj5#Ne-aHNRR+>i(O0o^rBI9qK^k}?RX-wK(rF-(}*{TBOJ*|WD8~qf_0h7hfeyv4A zM3(lM9y(TzMKPZ=g(3u_$cc+0Kji>@qV7h~Th96vq1;;Aipd%GyDiP+)NmV4`FN-v zzK=6?WyDWDU_}dxI~9qDx3aZh3%ghC+vuk@4tL4Ap#^mokeA~l*UPO0QLM}_J}kYm3tiFwM+^!4nx_aCPR?GBIOwD!y}k|jO6KCTKaGaRUGHG zZqZ?AinsE<#`TS6YPs~A5I@S7a|gTKvoR8yLSP}9a!-)rt{BMzjXkyuU3;H{rxoTP zKIZg0Dlo*~gX>#3s2@SK*KT90F6C~P8BVrLYv-V%?x`1Uf{X9mlRx~sIpWXeiT{1q z=r3he0yOou^t!&c;xeOB(#YS8PI+ZY6y*t}CBy?X_Ig5|?aRgJcMI(Hn)Tk)afLFx z-B4s)Fs~}?x~82QTBP$l`ns2)ZesHx)5)~EwcKCUiPW*dLu`*@`ZtXbIJ~L(p=6iz ztT6Op&+f9P%Ef_~e34_1o^fj7`87{%5RJ!5C>M*;z$B3f8!P^}hHb=G*zRS6)Rq%J z#R=-ey$arG*0|Q568regtoCx=?Yl|;&?D#Y7n5dpGDvSb2SPril`W?Lkl61SD$jw% zf3;TsV5_a(R59`WW9RD?z4LQvdYV&5G``MiA3OQs=HR=At#%_#OJYvx2`8S313~xD zX)K|fX{Hwz?p6Gqi4bcyzK~VZ>Pv;UeUQP-fe`dIjT`0ghMz3f^2s+nkFcf1XNjMJ zrVTkt&<_n}L^UaFkuf@lE$)fg!x%U126b-B8H#Z~9UkA$z8Jj4mVpnVb zK=TkTN34X(PuY8F<`7|q&`@1cH?anDIyE6fQLtk zCCC5Ux9a8-Zk~grzAVGrch$4{OKGbSr3`YCvpL@U+SMaV)zPpB#hP7W`OVW6H%iVl zdU32CZw*6B$-UU~YrST}Da4!Wdo%9m=iNJRyM^aY9p`=7y(PxX{p|0#6Z`C5F9K1_ zg@gs*x@p?vg=CR+x^ioiHm?u!S&1^M9hHI=_ptM%s{|GR{vMWmmM=-4_r^aDhC08n zI~hfs#;UB@UQH&_QS2c@SH1r^Y zCf&_k%yc=zgOQw9p~zq(Az|Wlv_fCMVx@=Tt(vAXT_LH5ai--84{lepsh+0ti;r<0VLcEf153BS?Hq>I=YCgT_mn(Q8%T)bA%vXVl)YHW5;rH}J zW@biQVg+Vgv|;on8l9GhhTp$iq)h9Smn%Pi#=0G&QIkl>=r?Jvp5}gQH`?8KDeukn zqi2CxjubI@y781b>-_SwFn6=)5)Pks2EKm(>K<2MdzyY%sj}T% z>$$*TY&Ze#tl=3W9lNz>F_Gy2-%NKlcCQ!jO&P+kEY7s1hm~kjcO>Y7HJb6I?RPlB z<7uQNZz)2O32ajHi>vFnK|Ss^r=em54DViu`9%oO&B&8}DDKF|D`lfQ>~gLA!&I(1 zr{7^hm7uL5*R4Cn9-fRF>-S}n=EZ)nT$Mxq+9%AxjWKWwLnEv6naRAbXc$NTt+SP| zm7D1tvHoMWFgF-PNaCyHQuq3q}kyQFgtd>trNwAc+K?XI@jJ zUI`URr&?c%zKrcZ^q5(<`})N~ z)N?c0q?tV-AZK3cKyO~;&KN|!)Sp>1(rsAKyEEpylK3DLoPzU96BT}Kov)}N&Bm$E zjj{PpvkI4UbyjqVH2OM5>;lgIBH=G4#tR?p2e22k2xE3lmQWh!snB;^OuqeR4g7z# zcm6+Kd3I#?rIO&wjQM+;kz5H6jtAX6J$ANJ?XCm@_em5dznVno#|Ykwv$5Ho#_&{s zBo7$BtX}|U|6-2-^Xu&G#Cv zDWL~rz!&P5q@d7D5AOTJutVDgP52Mu!47|++d9S4!rlDysV^4k1l1ZSVq|bzO+3sW zw=y!sMZR^jZ{*>T#}Hsm!~M+BQJuux&`S5pA zZfXfys@Y2kif{d<&TZ;8L@o%V)y`^kWA9ZQGJ~BN83)x+PX&-yD@|) z!wfMu$G%<#E^}it271E0Z2`QZ0bMCDB?Q;hU3dWn*8a<8P>sSLaPq_Q3dciF-uU9M}d{1~m`vb}$ z!nOeABVmtllDxhZdG@f%@O%deM4*^g=g8H2?Ro^fSF&=`4IX@+@vwFYT<0{dUz6BsU%!Aa zqxj|i9r$n6{qaqtuX2?u5RwB&%5)f$j{?)Ax&>6TVKGYDmJnMUW*#tKes|ga2TGjzS)*o; zC3t$HgcLO#t-HT{OGN@`lc?b_f&?%p-?X$yUZa;F0lG8->!K_BONBm$X9ta+J0vZ? z)!5-XRwS>}Pkr`e`K?KjzqJPN>JG>mdPY;#5uD^H)`uO=cvDj*Z@WrU3Uc5WJBgO~ z=IBKW;pPuBJLaq1*~*CiH^oIfWHiEUbn@ccpZ`U>zuG>r2&dB2Jnxmi-Q>|hCgX9! zFp=}$pe3X6@ikEi4crHgz%jfQRrrm7|AT${f1(}R{ntWsic!<;1id@VM1JFFfx_FZ z&gWMgB)@in4n4=%-6qjb*Q0!NGbf(0SYEbs;i4vaM?;HJ^v8=={Fl`2IC`wS_rhi* zHjN~Vh3KBL2=xuB+)X!N`+cZn{#vXQ{iK{n?r+^&_*pRAn`d=)){zQUb}56trNlBI zCI8JBTDAh7-6&xi_PMKqh4qrqwUuUx#?aU4er#oo<8U7Bo(I|Hg+!HAiS+u6B1}fH zx3`T>8iGdf#ieaS4lwvd|p%)^V4g#0l;n5u81F z0!5(M+X@@}EDYG%D1LlRp5TWBBqh(W4X>FZ{}-QE9!XMIH8Z;@>jsnqiqoGQJO(Lo zBlTQ*ht#BKNVQRm11zgc!>Q&z$@i{ueWF5@+*u;}=7qQjRhA#ie!k3T(N>rnh$S!MdV>qH6hjpOsodw4^Go%8x~3FY9+ zJaAe@*;waD);4;l`b=e@43$ir@l0v@vW3()?l~AqH|Vp?FOH|OuKZ8C_0_VN8Q95i zJ$IVoJ+}K8K3JSR)km@TM3snBi^R3vb9cM$NwaOg&6GIMN4=H^Z)fN$#@3D@#&eRR z*{O}pLzC8B`I3<14KYFUQ}yVBfGVZ#wYKTZfW8Y_+}PH;OU-0^bKg4kWt1$UcGRgY zV0PyffofY0Qp?Z%#(cAY^3kdvp)v1t`wM6(qITXWskN$#B;)QFWRY(t9pN!7%qN=i zI}ABYF|NEFE0zX5T98^!V9xJ(;HFhwJxx~{!BZxOopPp%@@a(=D~TN_6~liU1-=Al zn)Oh`C45B}euna}(%N7+M1h)UGt}(d_tZh#-g;}>=*!<`dc0cN{fN)IA_fC%L+M;; z=s`f6H^=_t4T|6gwV$#^jnGp0J}K(aFVGsT&=QK^b^D5=z{FoY?Eiko*Ip7tc(7c# z8D4?5PW|C4WCRETBp`oR?d%Q8LXNVV>Z8zrlvmS2>N&bB51ZB1tqXt&Jb(i{4aR1v?#H3?Lnc}jHlmotdwc}>P zTC0?(QS7XT?6wq*i+0Qt#QlBoBV(PS z_>qbrMHVw5A)y<+TkQ5{Dvi3RPDKKU1eAmSQXY8RvV=gA>ju{lg9HuR_V1Xvv!&Cxqf4MYt0Y!n! zsvaaj_E*g>%RDC%NP}N1>%sRm{+xv_F-#0A_bi*T9`ZKgrgYE{B@5-e5Exo5#r)Be z>^LHKujR3}{PEP8C`>OrpX$O+55+@#wzabng8Lu8g-eW_1itZ3<)+tpc6z!l`mUv( zBV(UF02<0UKcOmNx3!Z zwhm>q(GEa}jpyzaS96&{uuKnNs?I@kRVug??6VfBan&M|jlW>P|A&@61I_)RYdw@U zyXAVcM6-nb45cZ!4L|vY1gun1Q7!)>=*A136oh)=p9a>rZjh=%QM!eH50aH$? z=#nd3+*e*L*WnRO#f^^_auI>3HExMEN_s^PLmgk_&1^Qa6NrUUe-H$E+c3Nj9Agy3 zsO@T8ZI^LobZo)~PTbW#!@~GNUiTUDr{8uA)LY4)d((&#fLQn_Pbjr%lDlI z#Fv?dp~1PY-HwAIOAYnF^1{AEW}emP^Wl(JI875Q+K0G~#58=+=_0pkKpyO?v@)*3 zU^u%aK|>KO8B;DtQV*(odtcS&TePpB^8sQ`hziPTO@6T!?R@v>`=o)!mTeza&Xl4DYv0+b)*z zlf`%E+ew9rHTu%v1V;xc?dsrv5E&0`R4TO6c3V5ZOu(rlW@K_YV6LT+C8^S}5T53N zI{P)Sbop?N9XlG2C<+v(+g+xfda9b@wn|J)XBkxstfRDyVA-Wxnf7)ktOrcxTjU3X z10YO6{}qz$qIjvI)?H)X@CZ50ee{om!Q`oVz0_`FaYb$TYfOs% zV!qW_)AF9Q>##h?Fh)>%(Q^s9B;nfw>Q6=4`MV=Yn1vbnG29I~9k4j@RjRRUi=K;o zPn0y_l}{M0WPA|ueNy(lQIR}A!i{*v{jJ=G=>Fs^{#nXQznl=XNcTbSS@ zKNSCnEW}(4l)RLkQ)Zt(Kc|p4*gO1aKfC$C^N|k?_+6kJ?tp`sg`n04JiQpn2As8D z?vNC;5n~*z8kZ|NKwfk^k*rCwF0T&y>}ASM+}>O ze*VYg!VXn%6*0x?Ch7vzfL1WTepn6Y)|87)B z)K}~^LY}EX%--H^g>2x-*pFi_bM%G~IZ``8m=lJ(nOOeqVu)Fg(9Es*T*q@c?!cA< zpc>^o+H!GsEAdsM3E786Fa1yT^HO3ZH%*M68l5YO6=N^2aZ(gIyuE9(`0!JIhAbuC z+tYQ{HXkd!a9+^UZ&pGZ4wm4=v$-PTJ)Q6cFoy$=bKE1Zm1hScP9i5eJ`5Yo9n%lg zqGWp^k+o!^$zXphA$FdodJ7=dz-%8|`0fzt%ji^~l8CKU^q!x}BmP5Su~eI-rgmRG zZxq~OeBW|${2mF&Re3@?;OI|0I(8T=ttk2q_E+N>;6BcEPcJi?;9aQ)SPdvnq1$rw zR2#I%e{Cqa6MQ|MhG+P{&a9d}$#2w!0PZYk`PEg=b7?NT(BM(|am#db{_8aV!2Ny` z5&ALPT1Ad6C3(!}VYM-wnNCsCyZCZM$z~O`dJDg?*?;7a1^;tQl;JngS0YuUTtaOn zU|W@|t~k_7Eq$K$?94T5C1G_eTO?xS;RQ$X28@$m{*zBT3ks zCeqOpFM4pg9lwpGt-X3rV#QGr{4eS+Zn!2Ec!W$0H zlVSD51#Jztnh*aQWMozRPl>uecSnvAy9{3^XxCBJ4N~l2_}e%9b6l|NSGX<8nu0w@Pepv^t*oORDqgUOtBpG#{#%JU%y^b%g)(LcSGZP|*e`KcPzbN0_rLMq-*| z|5|Gfi-UBM700z5=4$~s_hi5L;E@~Dvf>86xIj&|VKbB=riA#cZSiQd3cWrOWY-46 zmvCt{@r6Pek73nIFSM&CQ@m{gX&waj(j=?^j=i`MibLtGL&ILG&}WMujjX4=1x=`8 zc%E7^V$cc!hrX?mTV4|%oKkV%DwBoMfS>C0cPMWSBydiG1WJp1G7W2%S_Wpek0jsk!B^Z#wFywp8~oQOG;x5M49^r0 zoI@AxsMT=lRUm^}$;`gt8iQdv;#&a9J)<_G7JjCidh`RO43xaYmi(obi#I;sP@{|I z;)-Am!EjF3Mb?3uWdkU`4LBkc2~4T|Udb2h6NL78@Z<(u5Cz$KcRAvJU*@A`=Pzy2 zHDKO{BVdG_5?Y^&X+%$5ASN^lAFYGiwE`8ZE6=HndE+eZtEp(O4ED519$*JwLyZ&yhz`xaYBfWMFc!Rp1`%bF3^`vrAC>4(Drj`l{Qk+aAyd9?2)R*S*@(y9WP zezlH4b2C)y$%ngDODWVSn!h4}?JX4X(g`Rf2BrEO8Y6ZA0Fnp5z5pFiY`Bz&@VyP+ zNxXh@e``DL3$@LqrtqmX}`YmJM0>P8s z#(iR$``v;>Db78pn1G|a4~lL2rFPw2WP@~C*DDxuj9vxA1nWr{4X)2+dNU{2(3PkV z&`sT|=k&&F3JP*+rQ?gu=L&cm@bn*utAb-&DmISvKvJ~_Xki@SPKi6^d*{4m+^h_a zFWZ<|(ykt13NgiHWfb>k-*+|ZCv&qOG0=71=YHHUTgG%t1zn>8lno_|qIW;?a@oSr`%=qL*KY%EV$3yqRg%ATq)n2Jc<6sT!4Y@x+Q=TtmhAM% zLBk@~pfL)=x-vrUooqh34jQH~g#H!vD1UghP~l?1uv=2;7HdXcCu>IGykVD7L-MjX zRNe8~CvMbsN*y(>N61-=-3&?kn9)O_dfs1Q7Y@8vRT+h!V)!VH!o%r(JXiy9!LaV|frev17(JK}~3NQ+Go-Wjak%^U`d=<^Uy_ z4!Rolwc-m?^JQ-I)SlH?=i|KZDH-{}`49C!;@nY_voZG6ZXG)JMgbj$ORXlY#=->) zL^FJb;HuD|P>P;yKUMkiR!$;&?bDE;N6rc1J7N~$w+CB&giG{?e;BHl`7gHOaq`yT zjNML4Hcb&eE)n`=QK}99B`U!Z{Nz8;)}j4(?ll%Jry}Z5!JzP~Y=gpISHWonntxNF z{pk#j--m~omOrz{u{FkW;*CxT5RZf#yrN#3o0jM7jCrIamSSzu?N})iP~kwy#_%v5 z{`RhKT7%sXS-W3&$b_%Nivx5XVz@4J9_!-c zo^~N`ZhmJ(gL?%>coC|saC|9zf>-IdMi_3Mv_+~R=FyatfRO*%8f1F>!XK# zn8^e}C#o(&DPB2{jeD$mR`45F^a9roFDmN!h#w?4(p~`i zMau8tdw=|GiX7mP8wy`>Q@OaPL`g;CpC#;bBO?0#2vfq<+;IWLE)AVS=8(W-(YFh( zLfC=I&X*)q~%#=G)~O8n?HN}WM0X9CTjVMo1~iRh>WdhnkWsr zHaZ1IE$hh99}-5>8gANY3ByJ^XVTbT3p-i1oTPkQd zC4y&(g;hJ9UfxpG?Nq*0H>bSQa4mdakbIchV&77U5fmxau?Q(hA9F*5JxOTD^VnVg z?zT|$K%svuQ1WH(P_g5f5J280T5r)lV;TQ&AbclE@d3Nrrz1|!A>qdnt&_5gsRVM1 zGta2~ntb^L^{T}kgy@1qUc@|Vy2tuW_xWJ5yz%m7y~KA4+i&ka>v@Zg-jXS#l7WZn zhoaOIl{!B0ecKXf&BIT3$n6G6_bZsx8ko<#_I(xGRuF0CP4Z#+Y5|)j?7}Hb@5c{s z=WjXs;OviC^Wi*=f@FCYWrC{1lOuZkU!eX}6fBo&i)$;0l3Z&$;_r&9>E-l~a?`J4Q0FvUI zZwfvvYIrHKtRZ0%11)@U*m{Hr&Zc&)8^4VkIZT^^0tZ9(8@&2-j_m}*=h=Hl3Z`qN z+X<2sS{8EYPkX+QU!?AJ@!1o!pR?aj9e_t}KZ(=&L7-IVl!+cv@L_k**0gr`o7Jj{ z(04dJ3IcqbuGuM8-vOtiP*Sp*vrVF|W#<{4owrvw$C(4-E0 z2(#tKG(6BCBPlLn;1TbOjK~wRV8q_*>#N)|wW~l>AeehYHP$Y^7N)b>XdXr zYk|h_D;d-mYmF6(HJcj=xDq7EJ>^^AN>%W57`_CDG z{duxhdh&=uIf(^vbJ%%!kwiqg1}&}+RCI^pCb{~WX6>&oR|d?}d55%#*s5HxbW>Z8 zpc_QNcLe_iR^I+etUo)%6MQYdXs)%i^wJvsG+}rHdG=S|p-j4-R}9p?k61j-_b5(r zSdYlYcy~SZSo9%1&1I2@6A(um_T(*%u(`OIA0~!XLVGdDXtj;;B$3ZzRVo;;A&Bo* zYO4ey)^C;%y~#^gGvIq;V{~3Ds5wnV{~VQYOyC?IFXMIhMh}w`A+YqO>UQnI3Y+r7 z+di}`iy?^tAKari*Gm1GdD8hB)tZo5DqE8KCNwjpX)v>!H@|g9?g#@Px-xko1nl7L zViESU)^PuF6nbyETd0FR!X5#j6;YT;4E7H)fMP7||ZY_L?592Bz#*&l%CrpYB+i(pE1L8MDp6)zj>Dir^tYox$ zr%Qy`S^^kwVhp3hpfs=TSC@30$3FCTzKf?}nAulq@H}q#64X34o3s?^iA?D4&>3mZ zyKn0Ff>s&(1JPNE%1I>q(IflX$HO9QE}j@cGEmI6`XXL|3VQ`?i-?!G&rjkx`xBgI zHu=U3Yhev93gb0Ro>)P*F-LK6!K?&puF@bHg=y6LgbR7LbNyyRiB5D-#5?c<)~qT-MA+K* zR~dHZEyeju7F?i=lVMX5_YYrS4Xa#MRMoZZIJ%k!RayTE`@FPOqTEco@Sox{+OZr}dcYdj!AG z(ik!beCj~|f4newq|>#Nkzr3X%v00(aijP0WVMOBb1KYzb+lxaA7j{5{*mU&iFZKG zAR=v{Zl@t0S9T7|x|+C|>sL_U=NM3)L!EFV?)lE}ayZMcflTIL^fRKLkl4;+uu2tL z_gkBgok7ng50tTz{E54066`CTDRFRv{~cF5^9*F*w;=bw&7eDj6=|Ig#}Nz%ar_iS z2mq|LVtxOjFr;~$vL`h=?@IpFS=up+ty`Z`N{xr!y%PxHFny9iCvj8?~j z7b^tbiP$n!jvqCSm2ghQ69$)M7`i9+D{&XuH*a|kp;JwmDE zxRiR@J1st9kfKmc`bFiE^(`m!hgq>}OveSupFbd{ygk2U?iXFQQhB=|vFv$!4-W6z z?sH(}2TTX1h8wj_n{@7bN618vFX+K5iSLO7m$@+cCzi_8a^m_O*W7ekm~&uCD889I zJpQ*ij!$DkNagoc&LYO{F)jr6V9bm$wAd7HQ^&9ixd}fupC(TT zuE>1M-Z;29 z2r5VeS?zke@~k2f{e!v%a*f>5RK#tE$MJx zb)NYZ4SNSI?Ebrh=YRb_&P>A1)0ztcz1$0;s^C-n?0fZ=2;+&D4swx8tFVhe`4wW0 z4AiO&z9AQja`FC=0|PGJKY2!i?t@1Iy@TK%*VDQNsr7T(q#wV7YN`tDQ{}Rh$dI!E4KA&yhBKs&dmyX{LdT`GCvcde_LdQ%R{sV)PX?pHD` zMAS)|jf-jxR_qz_IC@P5CRlFi@iV?50!%8lem`$rX%fAV-8_{%jxyHiPO`%g;>K7P zy;Z7rcJ1*dgxXkb#*7^FNo)@)D|$Ig%64kSBa~c4m+sV!6eo1{x^mHy)`fUu?r|P$ zc3_ijxxMz}MZ_E}iMbv7*H=E@Br&F_ZmpnVT z*mCRX{4aFQq-|syZfbp8tmTXSaV)g7Piy9A!1WUnW4M7hi7C@TJ$rPWBJ>!y=0u4L z6YBinpnKN%C&Zrv9xUe{!yU3+>pPICy9 z-R=_3JKc$i1IYq@S$GYq`s<)F+bhCYTU|}rmWkDu7Lt9HBJ9+ zhN5$6p9cFQK$I4%R%KaL?>FHd6{LHI2}7nj4q$02sfQH~B6jZylN;!lASR5FX*)UB znm2Z99*=Olt0mSPc_TUsmv6TVs}vh=GG~6RwgZeFlgqiD`r?W|zHYeqANV2b%C4ab z*$gIvnj{OAg!U0JNcn}vpO8x&#a&Ca7@M7o7xSy(TvZt@VKiySpPxwhN?i@KQNJ4PYGuclP+)*WfL z=28CxQ>=3;p}~Nah~AG%Lo>yW!TeDb153*(*3srj4Y!&IjQ|A;BA%0xSS&?5aMPRt z{+o#)aLx@3NU5HmIj)|1%cfzG_YH}};B}|xfqm;;g4j|Eyz#R_wrgp!jIPArhEk)bM779%d--pN|HZpKNxGQl)An<~&xe7*{ITFv2!FqF+W6yh;7Xy0jzn*@kqe9r>LDl-fAG>jr+p8x4rY4d8R8g+J zp)nC*n(D&eVrHf020NUqv{yQb}^_Y)E>0|LvRki|P+uvbAMi93oN z0Y`(mk$aP?4+@YIG!}!!Xf+iBc6U6M$3&m|Owt(<;AH`<@}=t}6J8(MNE(%?XA|CH zkZF}pd{pktY6f?7fhDv~2y5IXBjy?BvHxM*615*u0%SMWYJ60)iey;H+gF(u++sX& zsPD^fJc{mz#epmI3%}5V7)ab#-loXAzp4Lz(WBx;DQW&|6TBODl5OlR4G+fbZmO_AU7M+MVeVcZaGn$aKp{))co-~0&!bn3N^4Ts6TcDx!9B&knwl` zHXLIw=|g0nF5+QI@^*(-Nx535T)#^*Sf<1HR+^@RF8}0=qX#G+tTK%EI(d47pqge- zPH#Whrc_SmIPEYTt$WRRs_KR5n#YNZq2W)6hLgJ1bHnCsyUv(OZL!fZwr(kkmm}mM zwKxm_M$l;=eEK5g95Pr!0-Lsl_#oKyE=5WwIG!f)PWTJNd>RJtC&WD0FCW8aJ%7g( zsN@27^ukezJw+^Nk@mMnH)aFYKHI?w3-dOyw_d%EF49Y=%#8bt zQxf~78PzClE}S>SL(Feuhmfl}yE@B4%D*zW&J@e7eg2=6 z(OIsy7UaK7MV9$Q^=_eP68tB8Q(cY9D}ozDTeMli=8i4!`Zl{9wYS^`YH66*YGxNM z)}X5F-1Z1cLS@tv)n8X+D>LF9NPFUprp->qW$A6@9?^XeE#u!$sFF$7Mi+U!dJ-(5 z+cmH3?d#L2)bCyKf;QRchb;ZvmD-u|_48;8wZI>*qZmvw;0!L}6;24>4n=+8ijKB= z3co#tIN{_Zutll`oh})_{$o9p3!Tmq&7Zs~;2$5nFm9X7RV=-e@%Up_7EX))Iw;=#N&I}pIN8dj_KXT-}q6ogl&fI z=7^=JZV7*U5ns?P-s#xGlqoEpMVAkW#-W+=9|}trJKDuoXe<}3Jz^>;N|;u6PGz?` zK&358X7z=9Y$TyS>WO#%G-yvleq6-gSy?cP4ig^GhzX>L$P#yOr@sK~qhwY-`$o?M2%%FR zCY#i4yzl^VtLIdRaamrFmZ3#2p^3Qlr`;w0YVn^CS6Haz z#zfP~o+1|&cWG^>h7y!|_EP!eMuTIY-flA8E48uO)@7jYE0=#7jo@5H>8j`pg)u(r zV}@@~uv7H(ch4}I+ilDLaR>Fu&uuTA7u}1b+bVabcTD(jL}>l6HAntd5t~t%Vx`bc zyGbXPrIw~HI@UYa-8qP_$Srkw{DiP!a)M|HsKv7M;(SG1}-Um5NcWUg$MUIm-4>yMfpu@lU_IsED<}g?>ZVx zdpLPFU7F#gf5sX~k)0Qb7zv-&gD!SH!F$|{7e1ueNy|Jy8aP~7zH}=p1$3j6qc=y$Ya_FbZm+ZEBFMUU{G3yl28Lu-xSB`JyfvseFhDRCS|R z=)&9`aAxnWC*-j~EYG?;8WkL?_w)1Y_GtJJZoyeZ0K<72*rB8HjLe*qvRnrCWa{D{m<}9dpYiVN{rV_P^iWOuB7^u`A zBsqW}mDqhrotKkD*Ube^Lyu`&;AsUN0`^YBEmNin*-$+y&4q zGUGl3_0d1>ROKf5NGk$s8s)PM?iUe>=MIjvj1P#&6o1L=_vi(CCyhUP4RK;JnX5^R z1Emk~V+qJfmdQ--{868blORTNKYacW1<_vC;eS@~T6yEyY{IeZ9y)k9y?@Z$L8_Bv ztzgW9*Lg>aVS=^%j^sUjn#*?qi#t*((;8|`a53Te69eTbS*raTv0toRA`JCj9jHBt zH5{lK@op!fe|>>w4&@{E?Qm~4xADbi5OLH7z(i<@kx8A!+wD@xi_D*&t+l}%Lw9eG*~PbO1KGh; zTBK0GH0zIb)Q(&dxXM{YD!V5-<>)7ZE#1jgdN@7$wZc%T$&i;d)zy-tvcoSJ)$z^s z3B(zusmglDZ&TOmlc(CZhpPcK|VE2 zt2_ScU!{o7H2=O7Q4jIG^AHq5iV}}+x>D3*?t?NR^4RhxGw}iej>zmWHV5YK(Qe@$r~uF zC=6SET3PUHoZKROkHrC~^VM_=NGrnuevk_?(4?ErO(#8uAP7RHL@5IqY{av`By8Y<-_69LMKla?-=4IMhq2`F?{f>j3M^^ zIF0`~DnCKN5jz)9@saXi7^rgC;%#Id2{KA*X?MAAr`unmDY!;!&+rTU~sB*b=(+#<1H~3zT)dcaK2fv zv-0V{n?>HWj{61{lu)HeOR>>0Fb#{Mks?7?7kI8aJqn%&?7->a6Lsu;#79_KiRkW| zmXEqGnCj_wDN=>w%f!@#@&%t-TQXX*zfNp*g zA`}|zX<~;p`Uw#Z#9mQ80>6(i|^^oA>(JE`hB2$O$bgDR>)$z*}t zg4z)vXdlZ1!sUfiwE6FDsCv-9cv6DALM2DR!qw7*#t&8`JN3gQ65oh6lZs+&?gc)I zdRh)nS|J=AI{r1g@iCnYDW%jlbS*xQD>GfO26^6Q=z7qT3x$ikCl6UvQEn%rQZ^y0=R4MMSLU6SKc3_vG_uB4#Em?F1}Wn zn$n6NB^@cwpDdh3+k{7`q@;{Dky=t7(EIf5^7|o{HKS7V#bhzL72!b&FT^{O%$uK4 z`h((5QAKuj^w$LYAhHi|{Tl9UXtm){Xz3ROfGoEg_3oGPJ zN|i*iymWfj%>9It-N{5E^dAQF`U8_+sQ z5`QGxLr;;&jBqSNHCXG3J#JQr386Ax(yD4<3g=9*YVf_XhZBS${N@)s^L{{3Uz+40 zi=PMCY`V%jNi&;GZl*lI3zH{RCK3}KfPS~mP?Pd3w;bmWYQFu|r*=+)cF+n0a*k7QjN-1Mj1wzigk>Yr7Mf0ki$?K+T?pA3; zseA8W@3>ml)OS}vp9vt-(%TQa^>{@B8WT!a#3hxyE$j!G>4MHk%<6(tAqN7K|+}qo?YO``qpaX;cpC9ky zLpG*;k8Id6HQ72#6_?W9L%NJ`C*6hnGftz-_G4t3JNx zo*Zk-GhbH46K`QD*l|fJ!|G~tU4O3T1=G=D5~`?}n!08xnuzc+uhMKqrQVeU__sLn zdOaz-Z``>K`I+S7O93MSb{-t|zA`?VVulI=k)Pgi#%adVe=(x%;%w8M*SLOX2XEN{ z;&xFJ*e~G=XgMM~6?;ICveW;@?ClApWrw1>A$_zRI&$^iV;@F0Beje1A8KatprV@w z?o)@;fl58q1HALOnDlhth_|9- z7Jwc=3|;+sy*z#gm8N7J-j$n@-&^=HSJUyCc#QIsy z%Lqw?R$sRRt`S^ejtN|01qtc)E|v*G4ChK25ii8YE?0zT7Ip^YGFU`1IONN!XW>*B zF)v=|a3$6f!V@KA7ZvexImW+VwMOXMnSs5_I8usgw^i#;UMc->Z>7>zm?Utm!CWsM zH;}e;JaLXSH+%J>{57#Jujq^9jCuT&2fK`1mf|%-pDOZ^ha<@Ngug}({R7!e$rSF1 zDSYPGyF=eC@VLmh1FpYG-Z6hu^+$d9d%Q^>5evFPEvZ2UpX>+B54hqHAD7)~(I>2T z7KS;xl8H$U+qpY_h)|4tIEor55i4-#Q-0rnGG}tvA42D4idIjXQdqHQTzU^<# zM^2`6I-wIt?HD#73<+{IGznH#sh_fPQasCMOFQx7z29aPw%#=FMyo}=Auk6-Hs-7D z79kd10y@?qCXZeY+6jtixUV)!LKm%-zfV%G6ts6g3>TM-C{Hok65f4sS=df;8V#G_ zmC^1SA<$bnpGhesbUmb+%#(t@c>5&HGii1|q%W=Si9HWHbIye`WV09r9X$W9u$WaqD17#>Oj^ z;$CMVI(Je&{x+NlKce-JS%xq-zV}n%#JbjH@A50S@41xVcxz5PCrxuGy8ZDRm{N4D zYklX%(SxGHRg{H>_QTo`9|vlvv#q$vNEr#$-sFJ-=oNSDkNpV&!Ab*&Eiafy0bt(_ zISC{?2buGKB_~?M1MITr-{e4@qy(|PgT!dSHex`lyM5>hHvA_fI9KcU-g9T0%lQcB z{d+%N(Rzkf_(+mm>xxK`T?Pve4N*KqZjlv?xfKkrE5_P0l0aM|f~P~6QdN+# zLjxUM*ID>it!7LRu}%P0;V}q!xtwK>ab3{@eHK zQ2m$kAE*EApZ`8j$Fk@?e!^45dj9bcA*$@jf1&7uELTsnfyRhyB*!N(Agq@;J5gE9wiuXA+W7fg)q5Hy ziEL95kpj%lLNxX@oy98sL(gt@70P0UEI!0Cm~s=#7!yf^H=!kH6VJX$#->~1{kPM$ zAB}MYNk2|YGHi@P@DnB59nLDZaxNlX9JwUguvaLv9AZQQlyB!@wmaI+F-Ahpm66Jj z;{c}U*^M8RVK`McC~^U26nP2 zc*46{2(67fXlq)1h~EHk0I+j>n#+^OLmUhOnbzBC^b%r6?+s4YMln|6>f|1HP`c9D z`NwbU*?;srI$LA2*9-j4&OdYb=y60mvz^PO@3y01P_gjC1}CF9jZc$_bB-ozP7JZJ z3r@n67&l4Kd&LCkD$W_?@%gK6UMjG@#`aM~0c)WoC6oaB?GAF~x^%}5fvUn$RE`$UA0VFT4 z{_tO(&U6$~9fv?-AV(NP3?O-sow7l&!<(>uGw^!@AaPLgvBh`fcq|q}0tgW4tzaY| zPbT^Ey|LC0W^zfJjq^IvHXRcbN&kWOjN*jR=PGi0TT1G;slzZo1Ct!V+ zbDkW)K7wyAxuks4kS7~sR*+`dm91o0HJa1R!OE&127CvXy-7iUH`men z4r^VRu!t^S*mglVesdb~vL7g{{>3I6Aej%Ke_@lYmDw#p2Uf;^;?Zr81$;mLxkooh zSa5#x=za#ft}|c9p2%j8tp?OstqiSmv(`(yx(&UbR9&J4Yg&wX0HIQCbu0mrL|B&A zi1#@fFrLP)Beq6q5v%WZGZAMjzjwSS<-(m?@&B*q zQ_{P8no`vfm0JfB{14*pLUy`rqbcu*FJX)!E;zxt9Fh%%?N%!m|~ zmmKzAXD=Zu4VrSHq9GMF!K30^EcpHV`L|!puVTYsCcHDy{~5CL85(rq^m@?I&9Dzo ziK3=w{cb**|6!*Af&XzH*nXxCJQH}DD@XH{H+Uo4_ZlPD*LQdPYVw3|+&XFE{gwUi zjj|~4_Ox}AsOJ_h)rBK~HJW58|B!m34ooDgBkRKMv+U(+GR&=C3V!BK9{Smay@l7)X zGdv!@KiXh^){qUPJ7-#FOHg5ngJ~bF_pla+rc{W-)WJ_O6Jv7S(uL!09zH)F9M{#6V8865) zN|hEZPqbKoMDq`nkK(Cc=3RSNilo*ru+A;ChIFO5W8l)t>0t<(f^h-rQ{}2rdr84h`0#gjz83np4q)K+!hia~zuI zI3-d0lFR%Fj=dupBEIi91CuvRS&^5oh`##~DIZA{GKb^es7!-31%N>vJOkOuiSa@l zWbQnk)qca`1N@^2Ab5qs-=c5AKgV!8MMsBGDsG5g|4{Wo=_a z8*eOFqoLw+PT^r3>Lr^ffAGF)P<7qUp}looUly6xgWX8M(!j*p`qc}0wGFJa%*tf$mZ{rV<2rM?Py53XJ7d4 zCEKu>`JLffrXO|$wI_Ri&|{nRoc87%N{Ss z{a-%t>wxQq_#v??6bFbo$5T zit&dPfX1Drk^DdCOx+Sx0tVNOhhGMen0La$0X8dU`TD@>&ZGzgG2DVkhoK6ccr{@f zX`VxR>82`dgjb{?i_AMJx+^>}2cdZFX#^^-BeMj))N^h>QaNC{m6PJm#x-A_F6NF9 zv>j^1c+>4x=4k^*vxf@A0$1-ASio-2yEkL$#VXx$DW{7*v9C>x&5|+F(9Cm7>?~?; zFCyqH1(dH)j??vDv!-~{pJrX65I14i-wsR+ov**nDj|M;ndgA);l4L*qB4oc52cz5 z59%Xo#PFPG;&;&cyA%_fq}Cs|FSlmgQT)8SG(QyZ<1QOhgUeMZ5if3NYnRhf&*xk+ z{eJVWRhHk|=jm_>#NOcf#+$%}X7f-4FEhsUpi%Z3Y!E{GE>CAP=1A+(ePxyfd35P= zZ@r_q?(m)%VQ^=UGiNcg=5gy02t`8?I(^d7mN%QI?+G?Dm~JG8xib(B@WGiJ*+Wi3 zPCljC=oU}7udE%Vz2wgNF_zxwc}IdV-sO({fulj9fhAyLty1Q2QD6C^wo}?0(4JY# zM>iWh^v#}^zlwG7C_N6bz7srhMd9dBJK6Qqh@moVeWU155X&sfEcL9zYk5o)i&Afb zWZsQ4O77o96{lsjvR{foK&0bCxZQaT!(80goy(>!bsKNr`bB8|5YeSz=~9Kx`cGb{e@homQ-(}KtARL!$pS?h-EZMs{X8KS*MmB^fo$H80wZ)% z_?f!Pd#jfNG2XNeWgtHFZJTmVB8JwFgjR@iS_ed2g*!UA#^BtpyHERl77C}?Ht06V z(y@A9)yeCy+W4$ktI;uJm65tO?=9k7KhXD3#%v{G4?ux6>;W`M#Q+KRsXUO? zC#Zpx5@g4lKM>Q-ONi|s$m1CVR!D-*R|=&67>-Itth+L3DK7({aTeXDAv*eFetf~t ziZh$Q159}71J9%^Kl(1@k4fK2Z@m(4@dfshy0+2!p7h68My5U&ngpq9Fx*JtO^_X4 z89gb`n+!0E_}Y3IGA#o!4HG6VZF;t?FLU-|nDDxoauKoz$)C|ry1#$DKro(;Rpp5> zDpBn@$6)c%NZH6eQ|*~qFZ)<>y{enj?7fOv~dvJ{i#syGFLtCd-Yj=-@^BK&o;w^I;p*{9r&iWJ@wv=k-P4v zaycFE%gtJ$^`XtBNF4N=|8%1ipblcwnjUjrxf#R8F-sNlhC`Nc*_0@ab?~|}l}Q`~ z{l9#Ip>Va<~T;9Z+A{bVAnUbRkfqKOr|jS*c-AOA6wU z`2YS4_q6b5=aLu8L(^Us7flGO&jY?FMGLDP#Ihry?I zVc)g~oVNBGtjyiD(NxTR9&L%am!9|vN z$Ap^><{B$xh<8HRJ74nz&htMXkBDZ=5ur4R^oUz1yck$WZ;anYBz$+(%i+7b{X29Vqecfw2?d`!{D3H$UW5XUDbDvjdNpI{PrA0O| zM)mFHvAXjpVDS?04PHY>CSDLdlLRMf9o-}~ED@Cp)C;3o# z{Y3%jvaoWj=M5?|PutL;L!IM~7KKmS*8Dg-q9sx>3O&p?hn5!ghg??fdxvIjlc~)Q$ky68u^bIPKc@XWZ%PeZ8ekG29%i zmsqc*80@tt*v$aMF#0f}Y0la^xM{g>-ScAfrBHl%awiDRfm-^fBC0)?hMCEylsfC@+G<4~RMqKURO9(57=h_C=IL;dfzt@sy1~V)`&mT{ zx^E1wh7il5SY(WGAhXp5GFzfy!aJ2IPUBBJ50!)-u}&Bg6~unL6`t9UJ<7Hju}v%? zRNKVjMk^k_yJ)rQF7BV*IfX6`Qx{~oKpC>x<-ZyqW+!=cWog7El^~1q{Y9u=L_|dM ziexP;d?1)2TFRnYW#QeEFHs9&1(S6@Ao*)Kct|jHiUQ8se}?gan^X8=PNe4 zjx|Ky&DyYiJtF3pHOCw1i8u6xii-Y<4}naswL>s#8JCJ2-8qk*990dv)iKrvaYAIX zj^WWR@`Rf9RCad8uqcxov|aks8UF9l5P>40-XAhJ(Z2Y%o9Q;1^mYQua^>HHs5e&-jZ+}2;PSKOT?5m?#( z${F=d-0XY(`Wk4bie1VxJcF&){9+>T7y|0KP++fq8Q8184?k@gCUI&CDoKKQR{{$7 z*8Pu@NLc<4Kc}Fu8hQ}9*hEpdsQocios6vo&IKvY$7#9w&9~%*9t0}1BMfSU)zdhe z7AN?KE%Vvarfb;Cu&cyI>z^a8{swDs6&a{OvFaFFHXrP&C8>N*i9~ z#WL`28bM~Y@$wU>tDlH=zV`Iv-v9xdp1na<#?tHMjkjjhP@n^W=EF`*8XU_6{L{Lf zakyoP?%FlV8WYCjLTYQ9yoYO+BxtHJ^x|xQMI4B|KZZQ*pN0oK%FiR`+CjKSW{@;! z&)|1snKs}~%=IADYvDaU_R~uEU%lji*2@2HJ%cZCO5a4Ic=OuOm|hG0C0^TW(h+i@ zdUH>gwwY>c1ZO|qIPEa>TZI4b5;|*F-P~k3a8{e+FlEFOk*1_ZLHk?)Lt#~mWRnLA zV3XA}Y>V!+Wn-!aWibYiNMBAZm6;vDM1jA0R&IPlEKPz~2f0U4nf!P_b@)y@k?C6# zhPQo@@?jntohz&)htrfN$I2TyW@<$5^}^T9xzd6y#hv##w}cG;YkN!9Ol%zx114NS zPJ&IJ5b-4&fqHJZzlx}fEv!evkDFHV4Lhplu`+<=@QZKduU?wYhJX71{9+cQvT8Tk zjNRDN&O;f3ZWeHWll*Ge2|9>aoQQyAQW;xUmiOn-laC4Ov2IcTCG5p8?Ug*8L%Mmg zX>I3~A|qzX3_d*lY-R01#?{2^{EEQ@|IPI+xe+^en%RWHMc6OX(~tZw-HBxWc1iWa z#?>FZS}|c-zYHP#m+4uq{&N<>e;L`xbalkZ^#4HJMn6kb=gl^buOrCmw7dk5(p+a5dCJsZ|_9Y;uZk z#iOOmo~L?o=J%^eQG9m=>SPJvxc94x?C(}v1vsk!NLB?j&vTt8ARzC~IJMIH#k1)F z6fwn#^Y~~W{8IaOkvSv}V_SsxN=WhxBVimsmHM?Alla+6f{&2$ug{)s)r8K<+!yE? zKH0+)D@$y4lq)DSa{SAD@DjYEI46TYie68x89E^9`RFDR?e#$s^i0m+ zOc7fTJQ4Kx7mc-;Bo7%Q@|Sw!$Y+Sp_0UT7*McpbRnW8>1ytWKd|hk0jx}=oF;B4^$>0Z|3);UGW2It&jmIet z3L3=y-h%4P8Si*oY*WYm(wDV;5Im<#KFc^B)~kApts0{bJ3DAwy zHuYY$Ce8ab6t7KpS#FCR7+Z;@c)s`2$=mu`{1@+0{U~c>*?mioi?$S63|gt6^5jc3 zKeVM%Qd)fc*OCAEe|0tg>UwjsK6ngFhK`0Gi()HC29iLq@M-cm?~nw`lB4>5@e!Lo zRk;wH2TNfKfOYZ7!>fi^j!&fr_S0E><7_NG@n)YRq`ib9%UBi7M>c$}~_FV8gw61wa_ugA|}AHu)X+)8c8X@8{3qostBvH@(`d$RfD2smc=p&3eW z4n#4{eTcJD+^eGts_UevD?nzZ$JC%8n+&S&zka2_d*PR%m6b8!{q+f~p?5B-6%OGo zwO}7+CT_g0U7UOkDr_AZOAW&Xme=?ga-+Ea@(#%o&ie#vny0Kf^;P2A``8k7?K2i zI55wx9pn5ri3`P}&a2P+J8q=kwZPTq@!y^maqJaSo;|V{(^B-D9)=6OTA+LGgG|b` z9jq5<_112*FYWMjOaS_;-rhpHx{%qnggCFcl-9ytH}Ae$;w>S^gcGPT`mkStggDYw zu?D*+y1Ry~-^rPO;E(4c#;V5$rEF^=XD3(DfAP2V?Z1-4p3!ogI?(=U_Ild+1(5GG z)^lIPOC9eWB_yJHvNbJ&$lnr(2x6tbY}jHv z4274hAdj%S@6*|GE}pO2Y~=EtOsMK-yN)TG+%F0_vG0`MB2TG2MkC*$OB#-2dM4q8 zer6>S=B4< zXdN9_%xUFNpYkx+rpn*Z9AAVgVv@$OQ>6)Mz>1XaQY^n<)+lXPX}{ z2YlYP*}vU^cQ?{w4!6=1SDJ8#ns1q0lkfH>sNa@}(a_l#UZPXHNiy$|hu#paBMBM_ zVWN0$E3?TRpLjg~#yc7h?a}$N_}+`*ho#zsR-wa?t9Wa>$4zvVJO(*ZGW`oXjxNG6 z_JRHm0dy?->&UuEfQCvQI9z_od0rHmrRwS1?@KE*<>??pF(z~E`$u&MIRrulA@(O< zX0>ukR(we3!b7V|$zfT(r%%}&ys#|7JAuN@H{I0n-)fZIe*$ZT9`=bde+YR6Gb*np zmBl7@l$NeXUbxB=UnJ}quVuq>dwwBe`ykuqsnt^NqAO^6a`lsoiM0ovHu^*n|(>MZ{8!9)I%ZbonK-_j~eKk<~Q0#cv5*c~^fz zQZJo)5znq4UgrfWBoJQq1pH}7{NKn06R#qhZ>?i1K|Q0}Mi2TYF{1n@QEJ<*$)3LXBr`co`=(J ziZI1^2lbG}$!7)-8e-74b+v3{R$IzyWBqaMkH!k$ms6C-5AM;<=e0ezNNfHM8zetg zbg-5l7Gxk%381kbq{g_f$1P?Sp7hr;s?t}UYFHH2opE2Tfw1+%{0%>80xay4T=mD;5F55SE2X5_z`Ae8my zjYhkrs`IT+LA%x0*ccjY{3Pj1=RJ@lqT6GeO?NI?AWf`}_S?DsP)1aUNXx+Nh)FQ` z-W4luy7iV8R0(Enq0L0lR)26eZudFO#_vSO`6uM937Z*^8?up>Eq73eUh=w5pcQi2 z;k-FC{NNI|n6>)G&U*`@nuJK&;iY(lSY?M~0}r65*YowR!0vTCg-);q$w@QPOuaromWHuklo2If8*wgs zX{I;bce~R-+Ug_!j^`&Xa&>klG-ZD4L47@sf!yAx-80VE^*Qf{4W4zti9ybxUsJH)mNJ8}+e*$EH;E6sn_v&=_hKKfj+sknWqAwZ6i zF1dFkf$w}AoJka?dA;+AmcC$b*awdUewN%DIVn*pp1QmF#yOIidZS*0qC`gV;mvcJ z((l}!QoLNgo|Ytr$kES<3h6(hQ43jAY}~WXA6W8Oah+tHhJ|5ZX|tn;1F7yu`FG9> z!h6bTuRp+c1+wqQ`kxSsfqch0rjkxyvnkJ~v8-yYczvuI)INn4BlN~nKT$dY!l7U` zv$O68$)bDb_Y5k_62?x%mcH?z1w_;_$$~o&R}8X7^Q7>r4*fkElI^(Jip@*IBdnXk zt3<+DT2UAxo{xI%+dM>xs&ZKAsAG<;riHr+s7E_{f}Lo63JOEjw4VJn7#*>FOMRc`4rT9V|Ka4kHScH0HDnh_QK7?#CGDK!!e={X$V&7 z%kfK!ca)Y^ngs7;)68TKEROwVf(uOPelzwj3sW0GpY_4@I0t~EGrUpIh2;!__n++i z{{uP9h0R!Cz`8K1d=gn6JdM4DY^^>i_z4+iO~b>u0pag02T&UE<&0ej%en)$>+=Y7 z2=WM$)h~1A=!NpjqYvcRbBh3elau__9rl;M31I)6#8^4|^zfV_V1y^4U4M4j5 zK@}V^d-{(7*z_4~;FZUOyU&SZL?WRVwWoz#;L>=^K#cF1yuXnYVhj1quJr7A3 zMs1}H$OLKlrTENRUs;c8#%e_K?ZjH8qh72z9wzE}@#v`4J2vp1NGCfcgTB1(G3Ilh zfy<$tqaRnZ7`p6BwZhI=#g#r+itnTJVSEo9aD5Z?hzm15>A$zE0orqBj0dc9r=6U& zq(y4w!Uy08V&H3b$wd2=^s6>+x#LXXC@fuC@gQU~X25lV^G$R63$;Gf;|@U;bU3!O zWkKIcPV{c(MdxP1258Y_)li2B?7k0FXN^54`CEt#6=CLWNVd%?EKXxJW!9xg zNz(tp-dl%7x%K^{gCHYF2!a9vgVLZ<(lMZ$R6#&Ox)cTJ24x5ZC594^mXZcRX^;|z zkd8sRQ@UXo&*FWay*Ie)J@0wmbN=`puS=QnzGql>t@VvhL>x)-jqAKm&bpmJo;)3d zl<>^}1+=U**UmNml?X4K-cq=v^U@Fql3gQZI_ooiZ8l&!v<^A*sb@AS_vsy3RKKWg z^Elp~c&N)I`2c^x z4}Hz0S>^s@`S2dyo6A)7JrOMrXD(>ryFYRM0Wm?SVqtMlq*Dj$JE0O)f*Y+`NuSbu z(@rPx#a~Go$!?92Dh_^Ds@i@lVK!m@foJXwd5Dwp09IQHn5F!|p69=EpnuMH z?nE$0DST+Wv7h(c{^|tRXmJl%l_cJzblt7=r5KdAw9{kk_(hnLJ@xt%Ix53`k0m=94_aymA zuxt2duFK)B(INEvN=g}$onVeT!w7xGkMWl3DmytA30oVYTlbQ2J=o2JShQb|R8l=k z$?fTuy>yo+cabK-2!hKL52dU(rx$wj@mkR-WPpt3WC_%Rw1l+^nkF?-{CIO-j>$S1 zDe?ijK{uCgA(17CcEzhpz-jVr-kXZe3DTsBmp?U*%d z2Z!Y=BEpuGJ^Np9rC;GooS}Uf^Qn*0PjOI}+8L{)M(NXnjD)lO;dCQ6QSMB$i4b~` zv0RY@@6Gh>%vsr9*5;Uk&rNYqe>IY>?9Z0m=ROSC#g$OwVTPWilQoMA!gYztw zmdJBHWEZXR?vp!HAeZG8B>@(L6quq^gCb8nPMgGPgSpvlwuhoo6k$W@EM-snrVF>k zvelsj0+S{q`v`r$pza$xJr4;b<{BpF;Ha*WyyMzYdfl|<=LX(EL~%UzuC1zKZ+3y@ zEDON>8yC!aw*xBZ$qXlYmq%s+CWbPq=MP|-X*~%bc>58cQ;Mw)+02fJ!!ShSw=@ty z6l34;1LiQL-VPvQ-Va8{8w;)m1H_ z@wVe#ovY&xadw8RW}mt17y|$D}LFdlOH7lNQ=Zyb*SIeWn;ZB z`I!8m8FTm2l?#X#E?_dezuY<-C8Dvy|h#_Txm3ovORFJY{n*P>s<(b9py&RJ|zQ>%GT;z{c5;ui+D)@ zdkbsqRo|tKASl$kuZk8_HzMzpL z)v;uZ6c3sW>N`@`P2BcRl@=S? ziLt9ZRROSK`WP=GaMJ)hai=Y?);%t#i)KH;?6UuDH9Fo~(ycdL6ruZEPKCVZEKF3v zy;Z}owSr|(?>F;{lV;6FvV>*H*IU{}TLnGMUP^i$deb`fIXair%OD3>2n&d=9ipzh}wn^*r}?WX2cOl=lJW=4_Hjq zP8Z-#A!?7k&fg0<{MZN|H`YYh>K{zQcgs$`*6ByM>mSS?vBJiWz3#`<{Wm|B@qFy? z;_bn0|F8?+C%;DAXyb{sRE{-K#-e6mOYzcGyPAElp-AkW683t~vDX#dcPfiY`&wfQ zt_%`9Qf8TkaX|Z>!ir-_Y`o(+JFY&1B`tummF>g9fIZuW-8P%JOq*C5_`k+g40>iX zbqApUw+B$*^#CpN(_6xsx`TDF6`=e6B{h8P?Y{;FD0M%5=BKy+uY84!)vF5gCs_}~ zE6f>82Lo#)$34HJx;}kAk#7MRVFB1^Bz9j3d!^VuhlQAii{qSRo1#2ZEKUN7?t^%S zT9&lR)XSZcxz6lG_O?OIa*%YB4IE8R{~fsHNo}kn3Zu%g&XXkc*h5j8i$H;POEqHi zvL@oVhSsf2Ny}@)vO1=o@v)O@_0rPL{8OaHK_EVA;^pICKV8KrY#3x&7-f~zZe3mi z@c@wcLq09N%ReCZ;GafCey-~Sn9DO7DZMAP5hB$^Hq&Lr zX!v#b+V{RK+jxm`WeRJX(E74Jil+IHaH*SP-XUQhC$`I|rDd=L} zvFLkEu%-N=HmPs%MX{zUNt0gh@-d(FXLmKsiCAjeTjIW!-}Cxn5YnC~Ueadkx|Kaj zV1Cyn%_WTX?b}2nz_4T3+!R`gHLaA`e-tecx2tyG9aM>7EzVO?xJ>_uqxX5NMfrzO zLIjZOgkoEDPa8U`Um+XKXK=p-?<+O~;S5IQR!`()_1jUOUh;V!U))(lwO^g2d`uWDPl-$2@zXPkoe=beY z5S@4;AAbIW4%QUbRmM@X#$UzGg~9s);W;!XQgLtf)28iR3e{)nTM&UM)C9@KO|Lgr z(`kblO|Q(A6B*~CdiTote9EH|nDkKzY+j6yoo&AV96|z~Y5+RXEr{#qFJt-k0V+_R zU(>v-nAVAZF3E56_K8sV*O2(==Lszt*PIhs2`DokmA*fUNv2fud_%4Cr#*0IDk&-Y z#6nfk$8@TM)q4~d4@)Pqot{PFEVOg|%^pi?|M-?cS;XZ4ueqTE65oUjYTW3KkA>Po zwOp2IJv;oa-L?#|_kPq*iySr2m0h{Gl9 z8W+SDTemDuP0BkGr3TwulMJ2r4C_CDp}n4q_^G~sZ{ho;IVk$Oi&})gMDERo7hX2y z(8rUd^Pu*)rJ-SHnBi>Gd+%z*IKe|5OFTBFpf`NvQiQ^d)j|s*nQSj5?tL-RH7yJn zzNBA-zW0e~+NOlNhPq8$R_MD@E#rGx!hLQHUR?0dSKv!tARr+-#NwopxQ^&UFw$DY} z9bAbWod>Twk>0`C_taUhuf3+XA4gWJC_gYj$4t?f`rr*O;5O#IShg!P$c+B@$uYDb z^vvrWVptbWL|$*@Yt&q#&dfT0dxJzcCr9a41oeZ&k+sXh%ye?q zM7K`8L!OyUq(zQteEJZ7So9@e$g{eUdOobfoxeFxNii*tnMk%`v<#UrK4T-{id_NH zT)7ST7`~(*5aU5i-A<@Mu`sPEZnuZaPCJuP5oc1g8QX>puS)tr??2iHI>@m8sda}& zxbp#4rWKAIuG&n@>NRh!0QUgnBQVulj_%*Z~P zI)(JldjleUT>i^3s-bXF&2X7;gzJ5QE3YMmY7L0%JP2^{*daoUh%W;#-^V@Dnt=;b zhx}rw_U+qSi+cf*#_=}j0A#r5c)b-7Z*7_D;OjnlT3JygHZoq;y9okf)i<-^1r09l zX`Y(vv_Irj{^Zwo19_iBM|M5mx8-5W#zqidOT+h2DaK@)1x^XHe0PA?I-AkRp5c`% zmZGSuv_x^$TkXO*gn)}p4tm|+6SlTMAL(7a9$N3D2Jjp%YyF>FcISea=Fhi)zEiF# z>#Yh_7<)Tz!kc)JePf_ao~7Tspl55jhlRO{z1z=IE64mm|+ z%h1}M>)xn`bfP=(ymv{?uW}f(pbW3*EH_w}0l?M+IqY`eB%B+crK!p6WuYg(;1>N+ zO(w?6VtEHIn-b+bs2Iwwvd%37A-ZqiMcVw0cB5PTgYcAwv9ivftP3?#l3LTiTSI7r z^U}Sn@9$H0-fZ4A5;ua31y(~Q-;;L7lUl8{K1QE@o;0#+X3Sq|q`aoKK3nI@uht~Jw;wsOHWuW|8l_N5@2{#${m3GiVZ zW9JE5&FSjWm-BYHvX-(Fa9=KDlHqYOlz3t5oyI1;M{7`93 zs7d@ZoAhYZGU`=5ma$YuF)EDCJ5jqh@>c zwmbCFRjhGwF$*h5le;4K@XG(jotAQdK8?t6+#WY&%qa32S)Qo}68^BLLPACv6*_-=;Z4cg0a& zgccX+?z@%yT9dnRiY6`NNtXNP01?>S^Qv%@!BM3Y*flg`F2UBO7@F2(wLKgo4l^!L z)(rpp`Fd7qXXk_9#Z~#8f(j>Un4<6@dziBi(}ACMJccySZeP9qyaKw6N7JmhFz`~$ zn@=~Fonf<-zNyvwFKeRqaTA_y)43tGGW?0#3dUoW_a`BwR{9%Z+S`3ik3G*>!`sZ6 zpZ(I4^EjuXRrkZHP307jt=0NT5ccTdJK>6#Jtdy|bN;9CPUEEi3Z!>e8&odNS{;}J zfm*#XAh+-nnCdSV-DwG?UDG3qThaB0-q+t!v*OO^;jmiI;q%tmbsYg%rnUAttB>OR z7shvx_*5aEUEm`Iydod=B+sIt)GIovL3LIioukqQ{U5K17#LVGEh!32nxCpB@<_b( zd||Wfgs}cm>*d$K|C=qCg*n{tKKl(*b^{3+F}I^X>dzh5zkJM)p@ybsUuq{2u_GH2 zCxvdzAr!{*e4dPmo|`Zj(w22B*D=A3PeE{m{Fm@?#)SEk?}nE=^<(V@6B$ELW?kJk zT4Lsuw-2XP!#sO%6Vk+An#?POTg`Fu(u}TR}h zr|F|HTRDDnb>p%6H~a!_f;LnEQnexaa+EuoiYdL$11gmegGT!1QO%uJ8n2>2Is40A~3{!3F32{Z+b^@2q1K4H$UBpeD%%hsFF z)bykj>WQUHOq-iG-4LKISpwEkpd}v8 zu9#}&$kDWA9KhbWld_Edg+?M_`^LzqOZ8HRtLq_Rzc*Q$vRjdmOj(v+zO<<+UTu{R zs}EoD#rEc@0YkszK(b!)6Y)Ff6cd2k;J>MeOVjhSiF1?DgPvp}?;h2;WNYD=)>DrO z*f+S~`Yl!CI@c2>{QZXwLGPK(KQ@HXL3^Q#W2l-L1{SMUl{+z&i91->h`|n&k-9D2C zL_D!L0U>CKD3EvO#rn-H`u6nXeDw%;wuBy&S%Sz6H^A-*5Fw5%m#8ZI>CW^K-Q{n=;^tGMl3Z#SG}~9v+?MlztuTMS^L~bOlX&ZMy!_(Xcg8`WTA0W0>Jsto zY_T+T(i!#PQ(V$inPtC9ZlaSZ=DUjT2k~a_rA#5s@AoWosWmb^RHUF6n*Z*ymCZ>? zK13YC2DQ2Deb@lS4e1UEc9s|k&v|fre9opt0D*JJi450Vn6A1-(Js{)f1yb2)wzrY zf@=YeO-v#4ODH;yEn{abI7d&ed|kYJ)0}8utCMQv4OMo~^HTpO4?mAT<>{RIN4OQq zXnon^o=H#2+B=c#OroA_Ca-mCi-ZU63&=6jAD(rngsYWD6Xa;I#Y!{TUL0oWWPcUv zO-PlYC}==G8Gp^$w=M$9EH_a?jTCkYpR%kK@eF4OxBy*6xz6M~qZ>Pz$5VDuT#Dt; zDd(wfswNNXxChlX3w|72^O$Pv^lL#|#m1&7&w!WR+#~WT7^WLSUa{l%} zDT=*DHP~7QnN+6r{~+eilSN-mP|1%UK0|z!m*HCX^%SBpdg0AHs{B=$-)7~gsr;4U zfIX)(Q4Kb61w@l99^!p|Yf8H^6&8nk0B3y-vAYCY4Rjtn0Hob9Ex_R%fNa0NHwg`X zZWc4wS3vuQcAp$Nb%t^ZesVAPIL%t|&Mc_u0kGrbr&OvI2WxSc8|IpAH%Y|h8P}Qo zN;1zGSVL2t^DvBh0$2=yL|&-EI0l{g)noz21s~|q(#~mrvlBPC`2RHIvbOTo!|)AD z=SX2zMh->S*(cN(^CrZ}K_Y7TC#_c60S*SlWvC}N-cGVh4~x=+2jdJf1`xrk)^-3f zDogR~;~b5rX6e9R0RmT8OE@v&pH3Eq3Wpl&wL#OuPCZajK7$Uds)A!r=K;C?WGq?U zC4@Tyb5PPHE->~0_eHH*8)3LCyakiOJnKz;Hb| zyQJnx6{^RNSoXxo!1EhDS3sPN1I?eQ(EhvqTt2xy68?rB3R+YQQYd;g#elIzfUyFu z_=)@Nr@@K`O0+C<4fHHyH~!>AgF655E0e|WheprI(SE|FL{V|#oA5kx6KC;AO5nKe!vDM{bP}mRffF`Sz8J`NrzC#Zj>(3H)-!-adj^j# z`5JN;K(Z$rNcIdNFqT0l)KNQ5 zBdB&4<@$0$saT0>@43iIdE^YgA;A`=rC@G%0;cuG1?fWIdHus%(DHPAd!Oueg@pmds*?A`h0`A_Q zJ z|A&2~oEbYJvYNe-Qz!TS7h&S5me?_(gs@oSfLXeHwZFe~min*unFy)V7`X9Z96EO0 zaS5rne!9PPg8ZM+L~~@{E`4VL;WK7HehDC23#o3uLrc@q{h9o6bjn#V+aU{^1}$!9 zA;v>}BOXjTuG{~aw*=>JF3x&cni7<&76~uNrHijOsGPkKK(J@$S*`3DHYm34YV7JE zc8qhP3zqRC%^gsk)%yStH-)*lzY|jau<;$S+3}{zudjc~gMO|vJ;oiFJ64-Ho-X|Q z`kzb@TY5L8(x1)0M7aAdV&T5s?Z;(2?;~nKyq5;Y1Oea4%w*P!_R?C)S@aLCk)$}A z&6Vm$;(m6ZySnza1O<1F{bP0WkwoJ_(1y;Hn_7 zy+@p&ztI>y!Z!WQ=zdp}K!*c1=kI3;sD!@+N2`d~g8?1Vd(bCt<7oA37tU<5CZvhE z7t-bVJ_`0`p)ifNMcnZNSnFe=sVB~M7W8GBn}0ZcG9Kqs&x=U`_rw2YG-_q))c<%g z(0g4toloG8518POfnnA(YgG4&#ih?HbavfA3dL$SVt_@OoTcN^ zMv`oUUEsnc>gqPWdG)z}CfA($t6IWu|IdLe%#uwSys^?Igv&s#B-K9iyue2CoO68N z%y#`m@xhX3z!=cii1iR!jGZzuj!qt1A)&j#$dpXpR0Crlz!8E3Y%REKPWmO(x=syj z5;JAuXjF4ni{TExZfqAXrCQN`^BXegrzr5Z|Np73{6F{g1{}F#0Dc@DaLc$o8K(~A zd^Ig$v`ple3KoN_Esc|m9~k?}wEngT?IDJ)qv!wB>ClpMQ#u%BGiyOeQM|&eL4Dd| zPIziVuWCGx?AC<8RpBq8S1dWidI=SILb)}`;`!p=A$vD+!{6V{9zvO)YiX{L)gjtq zg~Z}C35}gHpV#HQ<115y?7RHp^-W}y(1Id_n)S$mvE%fwx^90cn*ZOMM5o=afUM6C zfd-1x9eTw#xOUgPu_YiVdgtf=MdZrjHdfx~1-F4@J0oLAgEMPMR$oMmDfus@U(7Q3 z(1bja3GUI1n?evrAjT#f270K*(V;7o{To(y=151H+;UuqDE zjb2FqStRa1LZ5dJ$6{BO_3U)16Lu$?1yCg({SA~#xSE$H(?m7AkT2-@5j4ebCI zl~dxb{>que2z8kONhL0Ha5q)2FWvq4+<)0@vk@^>sbtC4Ok`NiT{?XzEVGOzJ~!x@SZnC1*&{y zsOl?UKm^{0y&xhv6ji-rv7@}Rv5_2)9G%>E>-$@H23frEBD4XtWJVr5Vtf;eTogBR z&gMmZx)=oKjU}$bV`sR~Fvfk?qW8H-(~UvrrH;^ine?*`wGe!3sFt%h5kbJ^&v5F>_ia>3JZdq61EmqL}&nt~`4iE7Ye?;h7wRjN($ zfOn>0+Nf+@6I>Ip=KBZ~h^f#{p4`bf??^lY1bF|Mx{e0dWT4}sjOGg>ibj1_ zNJ69)?Ud7IZc+*2b;A0}4~PYPTVbxIUg48!FQQs_t-vOk<&&%XBN&EdAP=(*)+t8t zq=1m;#UKcD<41R=5SY8J0xypns%E(u&9k6C>ih1|uGOl6APGnXZ;0h#&s%6<-4K%? zEI$$ozd`d zfnLOHF;*Cax##sVq+!2)L~4tpYGVxxpK;Ou+fk-cJhzR+9Ds2_dyalNYWD{uYFtF* zcCxYC28;6gqK`sgykRUHa=w2>r_~1mNIXP>0XjIZy5c zTa7WR+86|?%^-hyop(PeYw=Ch>;MMdsILMJ1rD3UnjS^(#|}Pd_imy79$;l7WdXJq zovAE~W=FIa?jnN!b7#W5Y&GH|OR?#U9R8Hi3*;_jmJTfC`(*&3|7vt?EMe;#ViTG? z-i~pGA39nc;&5cy-mlw8Pg&{kqwk7m3~4H~Oj838+<4c^N733((1RoMZ+?Jt`UsdF zy`xy~YY?}S;w~B*C(LW#;LKt#>?(uHdp&_}W#Czrpi|4VWdr)*Vuh<|syU)I0Azse z7*IGGG|&7Auy8aXD<^D%4nZ*dzyh|K44(&?5CdX+;AnDbD6D#Evunv5H@o3!UH+^x zPD;3KdvkujHw*5Sv`_I?cZvK;5s`4ic&zDH>IT=b=bU-F{#OoPUu2Z9jWvz6aJSKp z#`9+mpkkPys;OzeEK8w7Dgu%gY9LCNum|R>qNAD@A2tEL4h4@&o>~$}z_ApjhZ2Y> zQtY5ChOK@78E9X^4vYT1c>MKP!9*y7=$^%b<5iQXgy;&Pa$qeqnelOzW(KBfp(fK$WVVjgW%yiHhlEIT`+z>22h#&b@>1G!1(D$zc9r@s1Sh{p59Gyi9B|^pKi3j6ciD&Wv<+b=vq46MW+=})5BX+Dg0={u4>zl}o)w}ws! zii11l*=gm!*fyJRlAE0RnzAL_SVua}0O5GBxfmq|r`sQHWA*E0CDP|`l%Y+OD%7NR zG0+6lHTNYssFSpf7RkFE*QuvD49MBr+TNEG*4dsg)8Wz*k>EoBqUH9x!KlK!3QZ|* z>S5m;y~XMyZ7&EtKWI?Wp?9^P@DS1EQuWpFZGSa=R>Zpo+TwUq3eSr`$9u@=BSt%~ zbv#C(LNnri&?V2{O$`nW@wU%F+}&YZTywO&ub=EzG#2q zk!yVD5-&ce+*|Z6aC#3=1qpeOn7bJ~Zr{kqzePTE2Rz3+p&P;9Cj=DF8sHn}+cc@j zGh70g5^US3$vS34U~jO67MXDQCe`5NM5;+|8#Xa9Z?s=z~*9ToF#shAi6T@Rpl~|8x1BLG+if39-+;Hs-~WHgGn2#@(MKn#+!p@_kWF1sKu5oj!@w|cp-${ne0G-z#=y( zRa#Nd$g)0b$@UWO{byfANuXfHSMPeahoj(N&Qp5T#j=-ppXk6Y~&KE-rwJu!GvD;SYI4<8q+ zz)m5;5Nm4I&m`4e`ZwHqxo5M%{?rN23;%gjy-fy#O~}o%rtWA;`S-$x>52mC<^6LN zu+*m9;ljh3t7HWv*0%%5s=p`CiQt5U=y~edjmO!~ZJahfcn{}put5>Q^NNim^JH?y z`ER(C1c;?Ke`QNbj<+wmxon*&%#vMSPGDn4m$c$Y2ryCYz;Jdk?_zuit9u?67h{&N zFZ+5~oD#td>q$WOlZd#{@e+#}@zh?x=t$A zXM7m~yvRT|EXHDD59e^=6tBdxjJWG8Jk^2n^za+OmLh55t;JpXQI zGtA)_EibD8Af9crMOW3vQE!8eu-mn@xoXOx1frCO@Y(p}__(_-7tF{AE|yHW(oX`q z@nTno;y{jH@`I+5CM~&qz&(95{vc9o^Ij~&2;wpbuR>PT_{~ujk>A&*Jgg;`WjA4m z2!(RvWa+28V|-+7GO}av_HmtpERj$fpB6{^hBxf>3T~AlisFx!4Z{P-zGowHpt>cV z9wCtJ8rA*TJ@qC2PLoTTr$~)k;f5-smrC4RopvexBDAtKs@TRWNjJesOeoaNrc;s^ z7_IadqLXB0q(dQA_w6fF7W6Gua&{uO00@G{K1=>~sN3cSr52CPtk3YXz@L#@KX)w5 z-_8DKk+H`$OiGT+o0M6@Q9I0f0trZUb}=9JC7~r!g_2z1G8rkxXih&pkB!A zg!l^kI{pLlwmsxXo3Zvkqs@qOf;S52bk~mPbj62-fMlAdtI91ER!#p2M-O{=^}L;} zlGG5I-zDp=vl8?DSr2RO=g$!|NbL8Kulo7(n{?ucLx&hQ_Xvgyc;0jH{fUbG!c=vg zETc=MHwFO@YjD(&XTQg<~jGgJx8+$ZRo>)^Ag89dph5~ z)bZKo1ihN(wz9e}3{A`>8L_eTtY(1e5nJxM9!4lJ ztmnkK(N{%oVa&BCr&3!O?#!hp`7K@bo0$<6`WkvJGnpxR(t(R&Xp#4K1Mw*i`3c7rUJs`Ag?y=hzv@kmPmOsYB;6Ag4v! zcek|s8nRJ_f+3CBMLcrBP92M1Qrgnarf%tcx=o3dl`-MVOoE^X-TgOjFMOv~2>N;f ztM;u^slPfQPyI810o7a^$ryhE3A2+qL&a))ju!%rtFWh~kD{6=TY_o&Kk)ymc;=wl z)f-FiTZLxODlI~;^U-?neyG7OVJE9#%s55BUbDkCM2|y@w28IFsaO0o8e$$v5g!q~ z)U(OKp1yd+2lz)aAZ2eX^iL4h^kJm8Wq59R)n$lzeg_YtRCj!zm~46sl|V+Fob{RS zP}M^?EizoGF8a#Li@Zc(&EA8fi}QPj{4Z2=zRu*Rs3d$uf|hM84c$!kh{=~+w<^8E ziyzu8Mb^|2<0L;I@2igD-iNx^C`2~q-(97! zb5T$C_SCbh05rDUZZp4o$iEoAh99(je*2a4B_GM}Ze&RBJ^J{bl+3xE7yuJ^Pi*{=klt^=O*f;1#UhrgcRU^sWxz>Y+9W6 zQY3YcpG=E?SbTG-Q^q{%r54ucJ3-UZKFKa+>#N-H$2s4+eUs8$#6z@|v{6M!VS@3S zlkckMgcbvg#SYEYxkx^yGp|19NmzXFndNiY>Yk7D>aB}-l+8%|-o~2ASc8Dcu1s^- zIX$^r?dyFUDfzQ_0pSp&O*iW(ZeqF*rF^LpXjHM+v(^0)=oWYV_Jz|h);}QEO$bhZ5_DcGkQbt(H_XicvP*bgn%jyeY90OH(R2 zG&wgFu#cjT;h9#s@p$MuXN~2&y<#@EqqdDYh(Ji;wC2aYHRIS z5RxrbVY_lcl>)I&0MN2|nutv;G|pdc>cB7jGrhahrY_tN2t@a-2V{%{nhG%*d3@0^ zI$LAg=!(Dj)D255ZVG|v0c9N$Yo1~V5lICNi)Z@$eroGf!IvhxX%i+(s=deejj!J{ zNYEI@w%P-)OZeCl`AS_;*2BvIDAK6HH~Ei7FB`});8hDTa)r7EJVOO;x>cQp?^I4BqAWjRQ7eJ<5{Qg24;dZt&GlV?yMZ0^@2U?5L>>e3zG=^4zp(=qw5M zm+XLlynfxXpF`fU@d2Y~h~p_C(R-m^B8()bCTUHUIwe7tCbEXcTy|Glel{wKSo_}4 zTJ^oiuB@!=1alMuJ7K_i0tv8C7zUD(N8q2|yx1XQ$M-ndIj;g$z`r(Pe@A+{3~iCS zWtYxhwLGAF+5c-JYsTp)l3RjI7IO*#4!JAGKmq`#anhDL{ipASSpUtqLB-+k9LVoM zaZaEUa=mNx$>HPN<>po&_q2_;68I1@Y7GKhJC){$pG>Ex^v?i^6+zXI*$_t6r`6O{ z3~jq@ymGVNlJv9ECO`e!!apQG0{!6W?aj&};RU`k=;Lk+3Yd&L1@%T25;`)7Z4VbZAcKw`K5RM`oBpSv%lp z@|oEx`CM=Nk!{guyEz`vyyNW6Y8qr2_SIp2@aY81S+LyYXp`>l1Rd66Z*`~;Pr70H90;0GA#GFiK`Hmu{jJ?OIfJ22W zT8s1TmI9Q+OxuN0NPK?jDb?(aaZ9hVhbrXiVr4=KJ+ohP(rmRh_0NpN3{tk)QL{PY zrYRtapkBHOnAD@V#kZQP*no-a!V`=!Ik2LO+P<+8V!Rgf)DLN0bk4wU29W zwK}7)4-nNrB2;TRDes1}P_Y>=|EJqr9|k#g3`=H_O2x%WwRxS=x15~#U)(neJ(A+P z@`}si!u|zNWjc50Mn6mpRK=r`BjnDA^Lh7Y)J*owZsx|92GVIRgpN((-&yOioL_1q z9oOo1dJ)p9tCb_{P>GDs8g1Wx`~Y=NXg?mhs1DE#pqBuab8x42ytM1JxT(|>_a@ZN zZ$lbJs^wi~Jm`6dXzwfLgx}(@@DcH{+iKr6`ozKAshoQ0;_I;R2Rh*D^nc-uGr=}j z;yA0ws(vehhiq-;`jw7?7Hl4DnRLcHk~)Q=?7f4*R!oo#((fONYzx9l=Knc=v*Uf1 zWsb8kXSi%;8uE&TMiTSmE2TDbthdxS{B#5IDdMy&>DuV)CNB2BL&6v(v^ZwJHaL3l z1^RY@Yk6EBKR4^R5dH~~{^7nlL3i1m*aajrr(IqNP;lCdh z=bP1+hrSpl!Gy=5TpzKZNwYyMR<+eqY&luHJWI9I?YB9|DbA>JbCtQmkAxu5*e0HY zm#_Uwkhu?5cE|VEV_f-PKgRz>w*Zedm_I;yF?brT7g<<;8!dmwNHrjCp1SLFiP@X{ z3^5;BNRNeb$gUTs`LP>^qvxFLQ%}ZwnIfklBrbDoS+BiQzfa_(E7#o*)PKUR+aLPB zq>1VFz>Y2}WDYtb2~W(19oH0f(o*TmL%O=Zpz*81tHj$D7BR`c+N|aymDZ`J-e7ty z<4KOeNTw@b?EZknX?NKt2$DAneJ4fOEn%>6PIMQfd?4Qa8;ZHFjZ(%fE1PFJ*tggB z$T=rPHDQf2QHnl{d@txUJ=?1&+wyD`C;Zs*40^_&acC&_>e-%|ki``~KvB)N6EErZ zT7N#LY?vW-U1lja*2GKAQ@oO5D|g{z@@ky(%Ea|Pbx*xX?u7NS zbDZi06@ zotc@>@|4yuU9fQ#7omMpR4TulpZdqo4W!<0L)h9O87MS-T$kC-okRsMjg~waeg`O2 z*@rt|1q@$ci4eK(AAO#_whDjk0nfl1g((&aM2r~a&T?oHO^TTi#Yd&CoeG1XOks^( zT4p#Fs)DD&Yu7pUfSvvl$h$ z>rOSE=VoplQZ8w(NIx@QV?e5~<$qg6DH@Z1h%-ycJb8=k0iBddTbC@O6BJ7OsElH> zykpk7uSMB%`2{X+sOx>lqBIHPb<}3cr$VN^ycSpoy^ZS7P)@;APtNoCr>fn<$eGLa zxydMD#32!4YcVm`jNFRLQ;e=|beR-eXkQpXHX4_cYwoW;^?6$?>#y28xyf%%gp*F# z^PnnnH|@~1?gu2PuY?qdSO-b31z1nRf7<53pZ+HpT28orXY$(VH$_3!;WqKkjNwmD z@%ca|#U`PC9iYz$uNh&C8J|hJ7CvspX`_KFL>%&3l97pQ-s`@%w+H8-Vbt|=BIlZ` z&)uIo=YmJpLo-uhH}+UeA76&t8Py0vfIgHOJUU+1`q^k*JWmgn{?1j)2`f%V1tC$7 zCcFkaFM`>!P?9rEH*l6I)D>S&_>+<~`#8p-=yw+r9(UiW5>5!OsL&)zShaI=6j7o+WV6KTzJ;nH3GMD&s`| zwF+P_SX|jtKvnnwuyh8{qTS(>bcJ0120ad?GY|YD0vffXrSCbeONjwlM)vP6)jPDZ z$Fa;u)P%n#O)PRA%N+i{{TwJmzNRPqw2JtKZR-+@n=$>$;Xd=#*eq`+&=q7bCvfR` zlxt{Y)%cBdgx$1@D2PzqodEvfp-o_6#}ha;qG0KA(E-Z}3)7vw{6ReB7Eb@{C*?m&Oa8lzL4Q)# z1fUyk19U?H0^OU2^`>;wg_!Jq#7YT*sY4L!Z#E`@k7$`a1k>$6;pks%_=L0YFEc>2 zU`q3VLnnaCyjp_mq29Y-OLHR%_*<_I1qtwMki2tX#Kp2T-HnxHhRS-ih-=UEBPY`|oE zWmTK|n@qI;LHzGh8MhBJ9P<+!sdSLCrKr96zB04sj!)f>KN|<|xqHtqVxOCB2F|4Uh2n~hDxZ62_R|Sv^%F7Av#K&h8p7%Gab7pkPLcH^ed+c$u@${NK{uY- z8=AXuyx#m4uPd$Db7RlG>5d8!*&f+~cvZVro=%|VcK2OfK%SP-mridG)~-Fw zi#gQ{pN_HHw1mHq5`xu}A+r~!4kOtu!?WO4Hv^u z`e~DXtt{JuZzd@nzsqNg5=-C*Z{b2*!@5f_&p&F$pDUc6k%*DqfOlp5fTTFbPj+go zD<8@qKKGc+DJs^M>*JqG291-96|I&Xo9TtN$AWrE3E#9`LVE;*pJ%ctDWlNEi>k_+ zH=^_VV-#mDc-tD;;o8K@_Bg;I*K-EG^x4m-Z%j_o#yYg`RG~JuGOqIXhMnbtqVs_d zuYn9|ztWvvbg0hn&0Dt2I&WBTvs`VJ~f) z^);h!k82e<$iOwcttMQVbWg(53jahwzU|h3PC@=RRzN|8Y%Zb}eya7O`|R1kL02!k zSeU}>@#x>o*{qPVt9!~&l~N(L)Z{zX)J7T<_rJ!klQ2DuFW%Jh~_H5#_wWHT{|2 z2MwTmf1V7n{WK)HdP)$Q96JHZpcur5jRR0j)*lc<*&g$4jK-cXdJZ8t20%>9Y?;yO z@1k=GbhvQPLYv}e=0wiPxb>FJK1q9I|2`#5N}59DHz>bSAs$LV7XOMub74Y3OWz(x zeqx7xpXg{mk1t-OHS}9wb^Gs{xZ9x8r`!>~eH8;ebb{_L_>C$llr_mIv&Ij&B=m0} z&!?f+m#`Eq#3b6uk*6u>ZMhLRSOm?y(QAWye%w$gT}@Zw&!+jxx8{=kR<4d$=o<_? z92l;zwk44q1Dy+`>qcxWS}l2z%$&#Nbq&l#=^mo4V`qQrtcF16h>USCzB-td$et$x zU8h?%mS6J3(wlIp_k4JQ++$6>{hHFC?!G3pHs|hHc4$4kty_@jY?%}Y2& zqpI+(&E%X0z0RGH@>lxP=U#l25Ip}~l%y_frCs&jU`E<4 zljN6oi(NK#{a0&?=JL~P(;=h=-0Yjm+O#Z!uQ#{LU&vJ&H7)id5oAHL%Dd+37s+1H zVw$-t3m{nIJ=YEG!q$b)muy&LEKtIOu?Dm0@Vuvvo0o1XC{H68qqCU_4&)Yx>6j^- zwRfXuP&;-kOUW<+waSmN+uJ)?1s3GnP*B_&>s+) zfZ2qE)j2*4kkVIHn?-Z+KUyC@ZTC%NdtuneZ4DjW*;yfUwJ}0Zn;Q26ya(*%PJG`W zWAWMnbz64ULGWM}*IXa$Ti=533cdCM;(T}7cyy&SrN2daoQ@e?en3BUe7}V9EHETAreshU48DUDLFe|N%{^+(HQd>GWnu3y3TIj%uwryuZ2LE zzvcg8@4KU#+SYv&f&`EvO+Z?JC`d2TJ4h3$O7Fe*jv%2+hajLx6%gsYN$*ua=~YT7 zf^?87zU98>o~`?yz4v=}-1o*EC~h*m|TjPK$v*YekG& z$MBUx%0F+leSXX?%A}*KpPM3MAd^T?)?pLMFT9dOGv6&XgHG^BsiKh8tyZ!?)h+u? z?WTt&AG2M(ScuvDoHu09zqxgr`sL2kjN-=$kC4_}*>EI=_omm%MA7tTt_87Iyy6UT zUJRGeGwSN+TMsI$G>_*dh)typ3@76m!iweQId@yH00f5apd#e@9h59~;RqDa3S0IvE(7hwwkBx4-K=i?volrT;-5J? zKmV_NJ``ZSjCtAcHc4F2GL$mVsRM^QAeA6J?6(*uLHm@?)tn(MPcbcngOCb|mPt@O zW`uMN^e~NiGEUDPeJLCu=G)dMLb$OXFiA*4jffLmX?h|}3`Qk2^Y8mL8sB=KLodcX z`+)55ld<|r9_Xn%-zCEW3sC#^zf#Zxuppa2X#~I-`T-WFz)(U}_W)HO+5g^665g-0 zkjjNfhpS0|lV*IQPRLETMB1odzqDUbCf3fsKU#KO$iJtlZB2%)CwXU3`Cz%p;r}W# zp^%|VlzksBKd zV!ML?NZuAGZHeuLe-=CZ4E=Tnptt}#U;+__D?iwGPzxu3iRUSn2K^|cF#HG{b7)T1 z0AzgZ>HVXOv!kG^OJE##{_s^tCDp`DoqmxfWTIQgjv$e-cX7giKGNTf5Z1>JjyFrc ztHVnx)w3F3wg#vX=6vd?~A)Jd+A{Z zc>J(ipbch%Moox~<-=$=^0B%l(+#Fj=03JfB?y{FP}>Rv%=DC|lUgR`!<^asyrhs3 zJJ#p7P1Uiz8G;RWIbfFgk+Ky}XoyTCaZ2AL$Hho^)b+K&s0vRW2?t{fsX(1o=H_KB zvgQ_VN^dB0aY$48RDv&0OH6ep*p5zj*ev2^t7Hu6NDguciymN_@ZNi2qO_gs!N5Sq z=6b2mymj+jf66mT+0#mrts7g9+X4NJ0?tZC^0*bGsp#7ib^Dti6%xa955`}FFf}qI z^|8oOx*~c$C|Yi&JfAU3dgme}Tx+-TKEa5oJbGv9F}Bv-cH9Qn!09%Oq?(lIwBjSK z@)U?Ub57}MVd#`ZRPgmW{~Pw%RCr#kQ0bgc{x&*NuCmu$&`Z~5e*}YzA)qk0nH0QOF_ek$?a!P2x(uKW*Wq$`rJ|~M+!H2ZSs!B(rPkd~c zWRx)BiG{*^gXt{~jb-TrHs8z4Slo-c0CE#uF+O@LL#Y(dXRDG2w)1L=zEj~JOn5_8 zYY5_S8u)Jy4?*`S^NQRv#3I=*g;P@cmlvnWaO^hU`hOI}m;%wd2!fP_qoOmZwn=)n zJa6TNUiE8i4e=56K(MkDGy5)W=nke#t2;xLLa&3Ij3jmcw6S6eG2Q+haY#SJ`UO$*851A@Ksqhtc}S5MHP`9hv!_6*#kY|*>PXZpr~?EyS@|^XyTxxnu$@^d$&PapZa{$ett();iBtUa%?E&7y`HJV56Cz; z`wp@b`&#Vz>#0V~)@Fb{5X?UGZTQu?-Id6T>4tAfL!K(o5#qn*k>?s%J1rb^K*yZ? z&%X&x=$;xQ&h&_O>S)rH4@wyREa_93hN-L({L#`boyd8s5Yz+I*>>% zng>i@6I(NllYjR1XDY!wa)ffnI?Mn!!TURCG~-GZ0E7Hm>Xu0V8xp(u*H@1DY||C*L1bJN6!Z1yF*@7?7|q~8Oc-)MkxR){in}S4+rGZBh=*)6h%Mk#L@RA0$H$ zP0`07W3f;{@|L8T@mBkB@0siYJm|9lIM>G2YLavQzSBO3@g$T=q791fB;$=9an$m& zPyo9n#X^^`x<#?;xK-&7@c;ThIrTr{q~9t1g&aRdW9iTD{k0+wI+jsc4CKOW!zkNn zWI%81?h^QX61Ej|K8gR!g?`osP$%yfB2atI-_3dCO>^5@hjZlou+ zYSXOjc|udHN^MQd2M@Xs>QCzlyK0kvAz7edDJlQAvIMFP zAC}$?nA%bUPRP05JwN9sf6-caxI|vas)>FPWeogH z`BW3@Pw#+M5f$@7_b|D}ME&QQ8cyxlIM=PQD}R*wU*78lPE`#V!i0nd5shEVxD&NM zceBhF>;f>Jr9WnYQFZZ%vzjo?O(*^VaJm5f#EV$hQsL8SF1DB0gnT@`FiRu}B~h$I ze8$`3QmIm$)9jL#XVu2mm@GA@?kpL3511&%h-(}vWl$}BF~~>uCo_TZaVjDOMZzIw z^=`UoO2E%HWr( z?@!k3rK)<(AN8jvzbs7?`~OR?^1?CI?c4KVm0@P8e|{0!s$>u9A*y)Up9+SIzcg=> zrv9iu^@QI;n&x*O^b(~aecvpH8%fsRYAelCH3Zc|dKa^F3poZ669 zNvL&vPmvA`k+{cVdt%?n#_P47{_<+_ishT(ntj+6u?Q&&Nb;K5{ClRCWbukQM3{3q zfg5@-u_kav88=wLyMTzEMVuR@1{v%d819!!TAPNcNDn8!K+!w%GcY=jfIM;b2f)K* zpkwS+(YyY=XzN$g{u^TjhSi=irQAQL?C{7F=ItQ?dx=@%wr}oo!r3NeLt_nDXlC1`WW8 zl>(d(9mGykfYtNIbPEPJvFKt47AwE)3x93#YK8WdqJn1a{H+8=Q)(4U6cd)7Ylo@m z=`MiP3wg>kx6vuQG2n;U%Bi_w`#dbJX@6i8nNrGJeb*9WVCR?_uLmyu10G0o#tkg|ogaWbGk&Dl2JLrH z0w2w#;y(mV{oVztp2*O@YlYJ{c`(%SmFL%3)K|N9SkZdoOK-V zl{@KI^V-V;<%=c%_Qv7Ax^kXJ@N$OKSa!IvegOzlMn-%NEF(-LT7kQ9R#W+0+5a% zG^cAfuczk|W8u%M)E|NTnUH^dXS3h|HGlt=AwSh*^)x&Duj!wPZPQp;%?=&xuO^AP zK6S0vyl8)?@2Ow2w_2$;$4lY`av4;XhSZCFb46f*0Gw;-qYLRv-pu?PomwkNxU?v0 zuQPQyvSEP@X`kjWvvd2Xn>!L#SxV9>t7*1xvQ`&Y==zdQDeAnqS*#i_o7BE?Wdo`A$trT<1v$?vxU01p1i z8Sz^!w|>;$NWb|pTty<^l>&3>PYS!Ge--xv*xtvUMAyA$q&_^(=(Rw=U)OyKd&-{C zMdvT%V4ARs&4HGlI1PxnkN_cOyB@oH;%M=nB!;zasYHA1mN*p;@MWphzk|4eXaI(Ox)C=NvQ-pd9A$4TU@|z3XRVd>p=m zEPiN`2Ma3YJEWx2SRU#e=W@PZ%eMWViz z^fg_tXm8Mk(sCo^D9RqA$(dGp0O=(5Wko>i;ISP0b~)8}mWSM2Av1I-K32R-e8UM( z+|IaDCt`D^I`utky-d=KmKxcb?3zkzLlWm3jPcp3a_}pGA*e0@G(K-m^ndGR>~>u1 z%n^h6H97K1+wOJ+oZhiteBHx8jB-3$-Ep^Cfw`;l`R&q-I?~!X-!W{?i)FzW7ugm4 z5MuB^m$fIuv*sxy_pmRaxT~)>Z?jRLNzTfB?soBpAUTh<+*w8KHI`tl4O&UOah)5! zRh)5;yiE-Gqdn3FA9FzHn$YTpX{0T`rHtx(XBi>+aKYEHs#VNm4 z&Trgy$avtcU^TmDyCjzMf})?k9JFl^gdW2kq}@XKyT9e8u-IE|uX0g^X-l$!4#p5X z{$6vr_=rGp{@WBg$ksS-|DOO5ARmW}ytZ8NnmtCj6*6_2b)*l{n<3%7)(;#+K}ty; zUbOw~u{5u7+KWsoIOyi{awCwm$H89r;LC*8++GqB`qIZw8U>qBHwM&)ow z>a`g;5cR7c6`t#j#-7VB?kJ>kw`#6~*p}{mLCXZk*S_84?3Q4t%S3C8=ui$)*dGZc z!9A*f~~ zLhZAIO1T7?hK4zF%U~ZFKSRJ6LC=F@!|SK^!mJJxA9@dD*KR6vpx)BDG#Z&Yv9?nA zSSGAyU<*IWJGI>wvrQN{^et{-uJPC%o`19{I0?AU{rWkY(k>al-kSeu@pt#bDcO+x zo~<`FW0E-MAsyqR9{VrFJxb!2g|QP?fO`HK>z9=SzNM#T zr7Z1qJl#`1|I%^}8Q%p0uU;k^z=$%+Ci{0j-7z%eQq?$5OBScX-hDp4dRWs$09#f{ zzY&>40x#Y5f7vliW-{xruxl7U^+C&j@0GRTZ*m_%ZUxBa64^8VP&h&Q@AC|Q0dfe; z7cWPTBMb+dj~rD0qVc^a*e(56YcyK9mjk;pt0Y4``w*GL8J^Ue2H9pAm&)Hk1#f^f z((HqOQ(VcoRJ<*dJ;T03QLN~A`{dPgjUVbPn!l;D!2eyH<@dH?#+qk_b6x-F$Io7m zx*5OpzeKY~aK5A*b%x(yad}xJxZR^7g6kNRt;X|u#@cqYcfETekBRGne3Rf=n&wZ@ zsDlR0FRw^`>_jxr(#ZUmq&_?a%ETHnS^r)nQkkFW@w?su>z6Ak;*ycVA&3te85>i9 z272YSPqyIgs;$k>4$QyErINT>@fhNs#pCW-Q2k7ecly$DS-foUKKG&-CzvcDHZGx0 z*f%fTiO(oy%b}RIIjW0^Iop(JlUxZ;Mxu9b6(o+{@CZ75;Vn*=8{Q!hGrJxfB|L4&dX({)8n$8+7vfP*PljNGAJjU_yai-9$ zO#COWoBe@ME4LKL%mdQ40KE1$Y1@{6leT5QqC<$RsEjU-8`*k~3A^BMWgNCqNl*s$ zu2pU~(&o!dIi6nOUf&l+u|lKfm>6a02q)2gdiB{gaP`(Y^r=3QuJAc zwUG!thBHK!*zi&oehe_i8B##!5arRukD0n&#Bk>*rr5(i5qaT+zO^FFVF2G!1#l*Q zTlH0cb4&d=`5pAzBKqU$|CVh2Pc6x%@-l;LSD-1U(*zo1Bp0TWHAeU49V!VOCGCtv^KVgXmrip!IU^y)dm|yM76ePb} z2`y+Ao`%b}gjZV7Yu4Z%=E`aEeyvxYd_DwDnJ}bBoi{34xU`F-?PWkjQei(IJ*cGp zjI2*=#0X6Z^d6+S=NV$OmzL6==^X`53=?`qc^GgI;}L~}=Yv!_{)3X_zfU;-nZE`< z@gRRQECDto|G<%|K>e48&(P-qsiUG#=d6W@pX=#=erg2cucE9;=Kazu2~PjXkNz9= z%mqIc;ek@S))h-?S6*-ANp7d<7VYe1I8V725dQ6RjV&QuYBtRs^ZPRas!Z&vIHhOs zBK56(p02nh^C*JPJ+PAPgD9Eeu^5-bDBM|T(>LQ{kv6I_BO%;FME2M z5Mtd62o;F1bdjPf$Ee-#ZVtKU_O0}si}dz-<*&2|RuOmO}rwu?FZpS74)AL10hN5ae1Pbw`WfEsK0>0Mmsl zA!G!$iz?8%9H!fy!5;*YA|{}|ZYbq&=Z2yFq@CMGEJ;4=hx~Z-C8~h~;-sGmtD@ar;%uy$LM>bO_Yv z)l!r%k29_=Vgcp?kca$(PW=%!BLo<6e}$|b0Tf-t{szWfD*j*Eqc%~>Q?-t+QAOO2 z-jax-KrRd!ITLvqO2X!}Ctr7d?XF%+hj*k50A-PF=g3b`cguT$_ZbZP<{hbb;yGy_DlKuR5sS`~FtWe9hhvJb7r1UDk7Bg7 znbjEKn}`RSJu&l>=9ZsWdr~{P6Bz!%w~I5Ve1)}&1HD`gA15c>TTNmSf<3VKHPU7^ zS9iwO z$Y*(na6<XhBX`SOW)7ne$3>zzhk|~4|4=iBkZYoZtPps}Ir4sZYpoGCFzME^%e;9rQ za#4n)fXbyUXOzFSS4=>e;dtUCnzAs&)TLbgZ3C?1IW2B9NA^K16cam{co%SXBarw7(U;A zY&$=Dke>Bvj5rpjKZJ5UP`TF!A-XGZ5S6liPnl}-3gab9X|_c;2k*P3_P`FtISlg8 z$j2e=i2%@RoXS+XYBKYH&6JQ9wumx{3BDl#-S9DUyMtgl_3wJ;A3ng*Y8?UZj;5CyUaTc#3P>6EC?L; z=$gNs-IO!HB9w-rceizW@%J*MOM`$sBIEBA^TLHq-g6sh`u%A6RxS>Sbfq6Wb-kp@ zeMh-buJW5{-Am89Nts7krpOrH?W|-xM0ixkF5?62AhL&Zy%1Ac(lysfGS#WYs8lvD zxaLcJUx|HaB#P5||DJVwJmIeK8V>G9f)ErX$8?N~*L$50&A4+5ed{*drv8KXEq#N_ z;5!~<=#*yBXY1)lU`_4Qkiye`|BBDyaNEx`=k0t^+#Eegf|lsug!*T~4yQs(ls9P| zcyQ!tLC;zOlwA9u>mpzp&$nlxxSqFo1I(9BFM)(EhtEoqFp{f>#W^7=Atoqd7KMDk0RoSu?6aetZe87I5jqid zOshQoYHG3jRaanMoI2pow!+mYYM#@0K$Rin^{ih;|16{L*{gGAHqLVYrY%6*{dbW? zE6DZE7DAJ~p{%d+Fi#5N9X+#zDM(bT?{RS{&dkGdp1~EnTGpnp6B&*|fM%|TUu@Io zNZEN)i7Zz-~Hz;6nc*lva!7N<}=LqfH zEk?BF*SXs$NPaZpGTM8$U8s+h_K8fW-dS+cbTEyY+-0bukNR&)?BIW5KHUIMb->fhGaHtx! z|1iPpf=LP_*DJ7UFGnA<7>J*>9R7O$s8?kj2UAF~Al+**5L0v!FXa1ebI2Oh zmLRP-s5*>3{+OH`8fe>dB;u-as{58Fw?8TT85;t1!;6rXe?8W2#nveHIb?35AjP84 zzNr09ZM>P(ypod>9_A(mUbAw|`?$}O zR9Ga;v67K8YS?vl_5?Mu{Q`4>M=No7G4<>Oo$Nf@g0u-WLKPQSI0gH`I1l0;6SX=B zY`E%NJuSNQ=3w)W9FE`Lt?gBuI-0Bh96#|Y&&tv0V7>usCl}q(c=fGne|Pr!R8Y$T zd^JXi1Dv?GLyK()8zNP!gE+HzO$oq}%Jl*wTNx?Dq0?WEoxV&lR<CwpSnCqLu zReM?pH)nWro!`#r%h0)35aRSPGq=1VQkdJN|6|~XB2u`O1kRw;X8tj;H&Zr86 z7M9M##TUG@-d+!0~ zTJv_Z!?)9e@>G93=Zxh{?3Cv>tqXe3BW3yBqwTY%M&Cu}^!g^sTnuLTLj$4$x=cUE z-ChgG?_s*BYh5w{e*us^D8=al6&CX(0-c9&-E6Zr1uY6)pE%GVg{mW&=^Gyw6K<0+ zA<;M!Mkv##*51S}F`v@5)Yc`5Bu5L~4OiCgzRp!kZw0Xl711Jmss>_?zWG6zNuQpe z(t2sC>%{;EBEBw5ejE|`2zjRT(4CPlLagv9p+SfFby;Av*pj^jH8CPT?Kx?w9b6dXx ztv5tTqL*?7#wQHY+*c0SX8D3@s!_##U*t&lpnl;&^!vK?laTf}g`G|LAte`gFPWx% z>qmtHSdF0`x8d-TEq8wX)`C<@!UC+VNpwel3tPeBu&5fAw(nS$~>e_10r9Rnl*cE{L}(!kBOD*6lZt3Ey#X z;T$3A&BS9~n(K33E?k!+*V6kzp|+I7q<_n^MFMQmuHCy6Ay5s~^L`P-eDBA3;1hx(6QE%)qMBmp>_JgeNid*+Cl8qsTx)*FdgEJ+X?};JQLYB>4&mlf zH?F5LGLZ8#K*kn{j?l-SL`FW_p^K*bM6tVY(f(zItL5%$Enw+7HUf=T+)vi7b&17c z>X)NMic3)5kVpt*3|nEVbXp=L(|&35@ZPCx0o#V=@e zTq7sUfkB3}>CFYm*H`CfDNUMU0O|#^V_@xM%&xmzz>9UpTWa!sCJNrZSKjM+PYV@J z6<>cYZAx5&$0+>lNlNOmb-UBx zVVHp8@U2c*36gs@Lmlw4? zLPwtX7=$*@2*Lw_6k3VF+f|wozuIG30adM^jYmhIe`8k)&;l-z4IkDW_6#b0iM5rIh$H^6S`K6}}2(~nfN5KD?1oFRnkJi^JVDG8K;r~ZT=~Mh zwx$ZFpH4J(K25qxe$q3h*n_ff`W24biapx<7FwC$jP63%;K_y7>e}kqCQ&| zsW`de-aCN!vMTm%p9av=1d!4OK%6X`2gJ+tK+-P}*tq}l2HBs#4G6H${GV_BKhOMs zM*H9W&WAQLD2m59Z6TF6<_0A}{=rKM4%FGq$ADnG?A}oRKG)&)iCGbj9DD1M@?5l% z*E%^0QipsW=O?DH#~Yh1dTKwy`#A6{F0e81deY3(7uy@mJ_zQ|=f1ZdUw1?c{!zoc z@@B|qKza)UO7Q>Zd!;`(^OkTgmt;FqM~gJb1!@wINVW=4+Z>Pz&Mo5RUKnzWh1Xu2#NvhoX}Pi@p;maV_Su2 zf7R)G_UnGh7L$(}%oH0O@ZZ-q3wjo%J=jtjW9iPb)9L;;uB|Ge4QH9Yz8d9SNKX)k z?b_x*l7!%Cd|10;ScWASoG{E7P0o}{Ct%STp%>SQr)l*$HTARpR`SHH;|wslVgqZ^j> z8!Dda&sR=XgKTQbJ@ak2wRK*s+>A&!eGpB{$~c#$uFruMgj8}xUPR84SyB5HrUH^yx7k}?U>vMfh%B!eiMAO`sOpl0-_3XkggAIs zVzuNJC2kOH1{Z3jPNMhoFv(FYxxDjh>9-FeJ`Q#f-lK&GBxXxrn_A)T6W}N3Um^D5 z>-_r2T4iZ3Y41K#Ze<`^Tb3g3+?fe{&MA9UNu12&bXYaUra61LP>j%;>vk5`fa`U-&KFz|*xh8k%ZQxI zK{-7)26}-jHd{ZuPYRXZ3GPkvZ#+33RdDp1F)3f~F$kJK+`F@Xr#M%C_Ej&P78^xZ*PM=f2 zNT>!VFb@{g3yaAdfdIM*|3RXc@xSQU>ZNh%Coj{1I(?+%C{qQ?Lo-Z*v?;z6FpO6C zEy!J%&`0Z!JM4<;Q?+H=drss!t8F-3n7p;1KztQjv9?Ol7l&2{NS%>2x}MuvPgPJW z&dTqmz?uNamOZ9X<%*JUwdAQrvP4bV z-|>B2aA6`j2;t6DlZOZl&Ckl&zd;CM=bPqtysauEACA8p9gFUOT_oNn!e?{@3*_u_ zIX6$r#e3T->%_7iT!^)!K1~TZysK^Igt-z5WldeuByJ?iy^hJgFs{|-=r|FGcF=v}fwZ+Q}zra!e$+P)szrK_{3i4T1$P1&gg9egq5 zBJ)vnx4#Muh;qEz8n=GB+arh{7V)-5{1lX{`f?)e>- z_c%DucE}UOyPw{-WFQ{`>6uv~MQrAsbk>rReRf{nd%Hj2OnGIboxxmjLo!r`3|s!9 z4i}hwG#aI?fK-#I%biz!Nb`V|LlmrBjn3{S%FOpncm+e9;AW zKnq`;VtDv)T5V0mH;BQ{KRP;XCpudo)c>JSu^K0;O#`Rsg^Yb>)w51lM1%M& zk`a>oy53JnHlfqU7l;D6S!xN2US-B-Vbb6cs49H~@z1rrI6s19OjHkCwBQYMEA7u| z0|8=3HOaKO^c)@^xGMJDBY2$595!g<3bv{FjyL)Ccu^#Itb;4{SD`dwql7AVf&Q8& zMu+;W-s`dVu-v}gT3go5&+~{OG5YeF<*eL1iARO_vNX>36*oDZMyVIA;p=R8{_vqLzo z(Mc8T%*Hv_t%$G^gZsQDAEa-|+p`VLPmwJ@rd`2AcgVA5huMV~O3kL%2`lCpZ1v$D zzY`|qRpXXHRv5R<*2_oFg3DtYxqD%dI`=9S47``T`33z*y;fE=x*n@kxxok9dfO@u z9D~lOJJ?>3R9z8zol3s1@iNb9A1O2Gk^6XO8cQ-RkoiA>-R+|&gLndXf;9b&l#re< zyq6@giFdV5GPpoRstGLEH3}RmN{fw_YjDu~PWs^hTGi%F8FF*V?O`F;1Y{EyB_UvM zfYwC}9-g0@kCwipE3Zu#;dOB96LOaT9m2uiX<33#A5%9n>1K62YZ1__anZXX6yG!N z9Ei!}y%=@0p=hNlPI(pfT8}qoLNo6K`7KkfS60*wN!SsE6WY5HbpUYrxA(3Iv5oa$Q_UL*M5`*+mVz7xFn z;OR_)tSh4Ls#c8Sb_I5(2{GYQbyXx(ghf!8U@DR*>6s**Q0#5VFQ_ONS?5@rCTWmM zI`!4mNER=*#Cj3GoP}&O@mn(eO20CZjW7`ya%56px5%Oh+1Fr z?v90oS$rY~$NZYyY*_>6VS0${peK8Oy+9E=hMr$SXL6Dgv$Vqptosjn5uMV9*94g3 zI=2@Q;^x=HK~y&j-zqOf2B%Fm<$OeUnm4gzz;6U(g1MB)IM4W}oDVc*wbFU3V-98B z$&#Z%9dcZ6B|qxPW)vHah-?*1ooSAC6wYINoG*;@tuaGK%ELqYM24&&3OW<85qo&U zD$yVV>hI3X@`&>wD`Z9_3+rK3c=;3(pM3Tc725ix zEl{<1Xn1^-anAN&vylPFxxG?8KMn)LN{T_oUT^iic-gm^we?>|WhRlMyrs})B{W>V zL??Z5@`C%;IV4L@z$T078^VO(BzOsu7X}qR|Rxb894gL4$Wu$x&=GwX?9X(_!64)TYNJjQAJRhzP9_aGq zM3eb7k{|&r`dR8*dYox~JF~RTCBv@UEzzv%xkzcRoH(eFx&pjF5&?o>iVUmC7vrOQ z?T8URwBK3La->_Ro6)3)l~Gi*MrXSu%zB!Tigh~o@kJF)@J)4(H``&rOneuqxuaGD zK#Or5f#8iQ&6i%0Ksn>qPKHRl*ZaAy+XIz57;-0;-X;o;>kMzvjiM&DyYs%aEMt6} zDhr*#=N$9Rnyy*Wg|0=hWOyQ!JTb^J@!HFLrI+8{lh3S1Coi!GSNB=c5ijbA4~Pmm zcId?D;amZLOG^fN(gj)tIWA=}Rqhu?`> z5XfBi-Sl)mv}7pp$GEbrtyhm7N$0g%KmG?%}$a#pvX1q9um-E?c zX@JnID*3jrua%5SQBj=QdT*2BPYoF2sn|?H<@7Q4~>1SKZ#TeWef+)j`)bd!qY`@jQ7VI{rX#fK8f3!#_~lub?;<_(}hkKZ`jBfap3c&N> z>pbk8hD6Fs_IyJKd@gx(8)YF+W~!+jnFa8QaB4u%khM?so9N}dm>DZnkgCf2>al{p zLxMx|3I#|=?{|dd^<|ldr5Myu5IX6+G2Rg+USjWqno;Gm*lV3-u> zLnj31Npo4LEde|EB23y)31}duh(xL2oHLh1$snr25~1=Sz&navlEx&7K8#I>(!=Sqli_AEDJ_WsiB@m` z(I8%5{TyKqP5cVBE`JBkH3p|^(M0WE&vConcRpM#K`kux!YIpEusK9s$I{1fYa=pS z#%C3SLl3e%;$jA=h#3VoxjdcQT8E^t*F-61cU#7d5>t$B%k{lDi|T)41`gjkh@xJY zDmYgPGi8X}*`C3SlDErM>COdrQigPcQD1kg(7G;x+wpN+M;opgArJDn_PF9f4kHM} z5j1xUPdU;#EG6iHFK|_AeKMucI4dIy2tP!44RJ@IAc0bNxc&sz1YT15qw5vyOK5g`>4O{vdG(CqK-jP1Sz1SnUyOB$*<>n$>j* zM0{a>ewqu+_17~l%)f)O)etmU8=RXuW`x4EX!7>iCR^^Jo`Psv5L!Ld`WumQtz=Pn z%N|B%oMFFZ|BLFCIF)RKFuUXO ztb1)*nz|Vtju|A5%u{1!O{CDIF`*}3F8x;7a|PHVl12-TRvC%Nr*aVZDh8Jv*&>>J z$*W~WREqz!1@cbH``dSxNXf)J2j({+W{a0a;<#ffP+KkD1*Sm2Vbq?NkfJ|iMpB5w zO(k>vZQpzk6e57b6dGckyLn=0QxQ*%Unx)2jy2V6n$;T|P$t?v--?THa*5@z4+zzxo#|TXGSvX3M@uUm7HW6W;>9%Tcb6OPjX1eQE#-6 zt}YG7>)o>yE+_Fts-A#=zD&GHsV2V%-PglF_sY6a><`}sIwQT-Ckjpt+rM5B*4lcJ zPwPmcdL;yCWz7vlRcG?OY^4IKv@{4%JY7XXr~p+sgOR{|)nOm%fJ+W`sP!F$@#aZ1 zm!*q#%61=%i~L=3c*0l;XxFNQxt;XJ>AnNIgXW-wKn!CJsJkE{SJ$}zEX;eZhoK^_ zQJw1U`d)gS8~@a|luceF`zzP3L>DJ*IeHgfVw9VS{2Uo%WTTXQkL~$P;#I!=y$X54 z{_^Ipru4yFuS4T*I0y8S4MS*MiXGdMzJsQy;fP0NcUs3pTex zS|aU>4O~Y0C}Rjn2UM`-&{BySqY#t6sh84V@IO1M*UZ!QH z;?+_4n7Q8+mMFBn!bx*jOLVX&N5rwvVq-XuU<@k}SH`DPq!NNZZV4t1g)q!$c_+0T zj9$O!J%xDnEJT^IaA><|5oTLWg__lgZ%8qNBp$^{pl-;j*f5$0Y1HCz>?@MdJ1rJQ z9bj%qY{*Ukln%k*(2x3}7Pzdz?+LnB4gw`pYgOY~-P#cxLI@A{frZo8%ilpNx1OfF z-6j}3=)EpWw!V6%?I~8{6XoyqW}@B)kViPd1Y2drw>rF(hFhXL_Y^zRpck7e3R)x1 z7W{HhNB7mTsrcFa#d1OJ6&ol0+*Ds00-z`Hts71fu;3WpHcF?~llN+S02Iw4eYI-q zNXsGNw&eOiymtq!fC!Yn6#c7F6RWpnH|Z_)!lBV z(4Zq4CfJL7RUM7)SOj{dOBR*yIkSP70?K%C%n~*gX^43uckyO`w8&scu2%ncbn-R* z%0pVaJ*b{(jrR{%U>^A!pFIsMI|SZXF%pTl+2A zYk-xAJhcv0o!3<9JWl2=EFg&mJYXz4AWlW)4Sa>&YDK7Od2P&r6cs9q!p+Hn%ndz| z4ib0i+eq+~8SM!?HjvXbtkZ7QgT8kiRLV1QvV(^DeAB}wUI|{me)c4N<6)15^ ztB5L@;z{P5_#753H|h4+qgM8#%k3L(M_R?pc&t{2LPr+&$4sHZL?XK}_g>vYS3*`& z6~u8QPRBT9siZ4Qq~a_Ra_fbLyE`u`D5U9}GjJL9wG2e#5hc+-p$x|P2o6f3bW&Q( zq<*FKb!NWvJf}v{Zj0DCR6E9llU7vqARyxfKo`2mi+xy#Po?~}sZiFES;7s(l{H85 zjsu3XEwfl@KScHxhBNiy(viBlpf4|niy@J~pUmo)ZB*io-Vfb_1U}^|a;gY#5kZ<lj?qbc?F?c1^Q5Hb@jY|!io&)w=L z_v(^v27K}`_1z&e!Eh3eH;pkk8|xuVy@J|f;PNL9yk&M#U1ciF?|d# z=fj>sm#+KLp^q{5P$B$7BY50(7BBZE&OcXKt-h3!4bSlML~4lICOjuNP!=H%Ux`Ww z@g6BUtURu(kI=OYu}96soOK4V@B#3?gP=AMSjVkcEx4$m{Z zttRAgcMbkV+M@3wZ$L!`!!>oWlyYA*tJDCF>uPPsMz0!w@iFFfQ`NU-t~m@=gQX&& z)=DvMsM2`L7nf7hFG$E#ZO0F!I+oGv4#!E!&pwSA)~&p-Qxa|@?0Tl(Afv@6Oz^2j z06h@bV)nFTU8UA`*4B}CjuZR-i^OrI3G+x}Dk)=yLS8{M@l32sn&liot~D8pJX*~{ zoD3C*7S{s_^mOa%$uF)??G6b{f)SE9C&YBXaWY(>`6bsE^DYB{eNZV-;s4+~nF)w) zSg-W%mcq*wbYlf!3bZ#$Krl;I4s_oRGU1^3c#W!3a_!AzM>DAor{Eysev+6GJc(=1 zqbQ>&;esjGwEU!73w@gTl| zKq&ii&d3cz-QBCe;md}^c_Ax@xuKe|QH>Jh5^w}2Tajp856qH*xYt+pc2`}!f>3C5 zHXimFdLsS*s_e|eq5RiB{*19DYguL(!i;@s3}xTLNF><@*(E#KN-;x{#$-v>>}1bA zLiX&T5;FERLMTPy{GQHruIqfi=Q`()-+yz>%pWt?JokK_`~ALO@7uTY5&4Xsr(1)p zY3siOu_|u-Eip!eY4pg-VUg};uD=kGYPYFI=?8x_G+71V44q3zdc!@_6X{*k;rUbY zJ&hQjKZG&Z_hoxKukt=<-+p}ZLs_#NC!ee|z*vhjs7iX^*@(S9B}AqS zhUBR;;6lG55(C?Hlr^_rHHo6|)8E<^8;7B&DXxFvhVB5 zo*v--^b|h7K~X8+E(4sFr_5Qs4R-<3dHg#b53VRqLS@V*+biF*NLGw_4PzkV9|Zn$ zZ_L8cjm3=S*YtDiD<&dlgSD6edG%)lWn>@{-yB>yw{VNHbFX`M9WB>a`pDzdbsm*+ z4PxL8DXc>8>1Gl9HpJ=D`1S>Z$Ek%4A?CA&#a}bAm@eOem*+%P&W;6qskT{r5aygE zp0#-4!6N4yx~~fUxFF4IKQAXEOOBMxYG0}@?`gWhp0;ZQ2k$L1%|IXc+xW$EK~<_$ zSeara=(_HU8~QDipUKh;Q9pbzjO9&5Fb()CLK`b%*_*=#bs#pgKktiP?l7$8()2V?1-;#aIbCf+U=xw%`BX zW8MEv>-;azL9UxupN!R|Q-bD{M6N6;%oYX^iA!qr!$;=>_!6b8F8903y;D$7NBj3u zpN*HtXWRT(-mi}g$RBwz*O2QT{f+Qw$tkY$f&d)$-R}c+lzpi9{aZliN9O#Zg9PaU zfl~ z-JAb-biTw#CocjW7I-K9{^Q%5r)W8iR9%iIH}v2R*|)+8-`(Yy#7|qP{SuS6l%5*)KlmMaCqnd#wF>us!LuYc z=SVvD4j4ayrI`QRRbxGlw;`r8pnADUDSn;9${sb-^jhZn)5_p;0%Jqffj*@AYWJB*Or3dO`)f3k4SIjyVlI zy3b9mauP)IL?5*4CA2zA8^^VKp_nE;sZ}vFybc9E9F$r5B6P=TWD{jb!}E$lCvVp( zf=IeEk^2thG zvVWPd{5<|=MBnwy-6TA5enqonL!z+L#W~@aQ*=sKgnEgAS(fwys?g5?=9zcw*seV< z>Nu+>=|e4(FIlY;R=PD`1;JCYPPHQyL^$xf$c_(lV4y+I8@;TACM>(lb6y5SkR*G; z2S2*VQ3Wcz$n#!)iH6+mm2r zT#?nILw#d^_IYzKRl76n8u@O_eBzX%-Nvk0qAdG)!HBDh3X_V)q0=GYZKM0I@sfKl zt)~n5b*Op%Bo^UtzWs1lGGmpqtgEr zDaT9ztiL?I9d~-Yfx(qpkwr2#|305C2Adzr6nEV%a$ss{0s7n<#e8 z`^5}A#KnwXt|IX=(1QLw`Gr*1gL}>LCSWGRy^yrXehHo9kSK_#b}<8Thq1kMxo<&C z2eM|JF?=Fg|N5rO-U2>R7b_CT^au|e+E)!SqG8DNltn6o;L`Oigh|uXNRw9lzyRcC z27i=5!s#6x>{4$lYW3*L}ECTga5J#5{Tfb(G1?F3&J1)L(;;mydKoMO0k+FI_ zL49o@Vm8Yp*{p*%-g95%Jpw#XY}g;$wQ03yC5mnFIFAQmk=akFto+R^@gzblu&}A3 z$n|4c{s)^1Ys|708L2@GS}=36ugHwQI&d~ZbC$`}rFrzoLiJNay5+XOC2a;;xS+rw zr)@StZD0Qc5${dV<&tY^a>vX`2HqX*@5T6*tnG3-W)NI<~e zlayeP6-Ts1?LR_(aJ2F&dgPT$x`@%G42tI+CFHode0kR13!_`5B=5wYsvCq$y@<73 zmX*b$O!z4c7kp&ybdLA3 z@9WbDT>Xc(`asw5!nh^UEGJ3GC0wQocRK@^hA^6$5{A?~p?F)bf8ypW?{S6}#_od2 zoE!Jvr9_*VKreqn<(%$fc};5ka^>BW&aUm7g;)p1ol<0dOFL&bGBamN?RtOvQU!R4 z|4+2*znu5~`Cdpzm_f{&dxbL>IYlemgB$^#^o#KR^(d#TNW{cl%M&SZ3sCme#IdCS z#$46sq1Q7J%)L-$eQ1Sp*VNpP79nRIX@SnSaG3byCdR+wBM*vgPO+U%q8t<;x4<6B z4xC@l8LVcOuwan|CLXOh`JF;_#`l#P#=|DvW^FX#FhfH z4|&VmzI<%^4qwma5!U%2pryO|iW=-7@(F&QF|zx7^kM}%BDsc-kOtsac4?`tF9{rVh9R-ml z6{t%&PS33soAZ#y>G9y{gyz*jq>PB)$lH8Yyx?fZIh=;oAH@0k~#=E zCkOl$3t9O~*DX==?t9y&MA9 zm5&HxJ{W=-W6vAm7x`5K)=HnYwSR1Ks{fmvP9XL6A;ha?4`5iwXGZvum|6JNSHIxi zU%7QUEj6iK+41J|o?mBNH%q1Bg%2CE+zlUB4I-Nq7k(#p2Q|X`5rlFUBqM!?iog3W z??~RLul~c|4EY(HQ{3(I^MKOQvMem%ycf^|C`!4V=y{y~weNFGduOA?>W>TG_G=&P zpmgd@T6Ro>(+L~Giw<)3r5t;{rNLo2Fp|cso}$`*S-E>S<>HpNbI7=c8SyU03{P`E zFbta2eok8h;S=4qtRMbszZneZDXh5h*X6d_dOHQ&I!Ts3iI#AypuX!7zsbNgwpOp+ z5{0eUm$@N)b6ua!#fm(s&JOu5Vkk9z%|W!_#K={@(JFq}4QBOaDUgAf7zO2$B(pZe zM!>Ca!Vg>ngnCp~ZITe7Hc5p1Rl0aw!rs!F^~cd8UQT>~)KI{{TVF~6%L<3nH$CiZfX zJdksSl6pqtckkFZSJ!gYE^`{(;;BAEDfV%&FWS_eH%|vZ!(lK)@$aH0oGL)EJ+4@L z=EeqVkMCipb#c@xxQV$;x=UiwBr;uH;EXFCa=C_5+^EJp=tr$FR*+uPf?I7FfR~G^ zN)-K_t?%5oy<-yria~>~(-~F+8F4Q0W4r~muSedF;aG6`g|gf}R>AMc)G;LUJvjEn z#{5BLvX_ge6f*9T=H<;E3%X)Ou=q3A6cvX(va1igUbv?)NlJ+$mu?GQUqJ&z^zR`> z5yoyryzYlsVBi1*RQS`vzpVcQ_-+&UnIm!`o5uZwotjc2-Nm>atOy@g2Gk3Gtd>W&-T-3CE92a+~K383XAFO@Ya!)}xh- zU>dxq4BTGdGs5xa+@3PXiBFV*W*7R}-wz%f`GmHvb&NR&l`aMo4Hv!+vu~WA_wE3o z1SRN1y5}qn0XNoawW2RzlCa52qjMOc(W(!F#K8M|?*QF@;3EMqG)O?I?LEhU94}&IWw7VRb+3G?4Zp}BWW1{aqTfu&IB;`BKQaqis66p z?IbMzx~6B$PH3kF6kCL?3b6+Rm#&${>r*EZd)G64!7blc9(L~GD36_68ZF1s5$EIS zjV}_>4}JNAhwrY*LHfh2TTdo$%H4JO=H{-Qe0^X6YaG(v=h%z#f{GBB&T)8#*Bq&O zcr^znNs47UDGLM+WBfAjOt|G97RcIF>Ls<{;D`g5RFk_ zviikLE_MG^a3`pQTP0EV#B{e9-aflw#P#GU6=1?psY!j|HkTcobhz-$%EjF)Dg!l; z4;5DP6f8}onx|^U_PKbDHB2%38Idu~Ba#jTmFD}#lYI=aF{gT^$bN{kL_rQEF(dh*A$XeY_) zb3AxbkD8sA`!YrIW+;SQzvsX1gyYd{DpqE&F5Pq3AKu#kG2N87b>uZuu+ z1!)iElvFddiJlHBxv-d;=opF{gfBgS^{*2ssT?NsNbsTvAHsuS z)Z5_y_umvytA=dm@4s=UBn=D%vGlZhgQAi=ry@9XR~UWO9BDTrfabgqb6$qC2>*et zy3&?Pl(P+_9%5Tc3+b6Y^XGM7wG|QGhqC|`?_5x+6Mk`oOj{y1H>4)1Sa>@#1|zCY zCr%R#kin|5({91qJGZTAhR8A_5*NC>n(cv`^3rNp~fblI{6io~G-Fh^DfNFH(UX5~!9G^W1gXIN>; z#Kg6UQ3*r@L3&tT0}dYQd&*Yf6J_C_WhZhh6hr5Wzh0mjGwklX*Hh#X8BG=d literal 0 HcmV?d00001 diff --git a/docs/img/cream_flops100.jpg b/docs/img/cream_flops100.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a31078dd8f1ab0aa02b92986982a6e569b086fd8 GIT binary patch literal 105849 zcmeFY1ymhdwk}*7cXtR7T!Xtq@Swpx0fM{hMgmE2C&Aq%IKeHr1b6oYcY^GVs%lBiS~chT<~P@d%t6)wECm^P82|(W0{WqU0AvG&UtUVe zSXEs`M*f8~00IF33WK7Q(n}Zs0PO5toz-O}DRp%9C=nL{H~4yFzQ-Lo=<1t0S}wqJft2UTdgJ zLCgFAfP`l8yR!4|%4X&+rT~Ea99rMU8T#PRa>uW>7Jn&E{npQ4%6q@crk1bFpmlzY z#o}u2`DtXvfe=CHAY2eJ zhyp|lVg_-7_(7r|Y0wLhI!G5}01Y8PS5nMf758Nc&I@}4|BRo1h89Xz*AiO-h4!i}t z2Ye`e5_|!C9sF1LY4~mUD+B}t0t9*lJ_I=gT?A_cAA|^mbc9NT4uo-pO@vECL_}gl zW<(K06~tGFu85(CpAgFs+Yu)aw-IlUP?0E*IFV$K^pNb4f{>DsN|4%+CXjZK?vSyN zX^{nyRgle)y^y1k^N^d6$B?&??@+K&=ut#aG*GNj0#QDqRG@rCSwcBSMMb4T6+l%( zwL%R*{fPP*wI6jI^%e~WjR{Q(%>c~}Eefp=tpjZy?HnB)oeo_bT@T$AJqo=Ty&HW6 z{RRUU;~9oLh8YGJBNd|_V*=v{6B&~hQv%Zn(;G7hvj%ev^8gDOiw;W)>lKziRw~vP ztXZr}Y+P&(Y!z%f>;ddO97G&C9BCXgoVPf+INdm#xUjg?xKg;LxWTx2xL0vQY$6PY@h4_PkRFxll3@+Y!S9G`r8(*5LsoPb=M+?qUryp4RH0+&LR z!ipk+qMhP9B|fDDr7h)0${xy-r({p%p1M8FemeT}j*6a2lPZX+l4_Y6g_@uG4Rr!_ z7xf7Z1&tDoFHI@U0xdEvKdmKgGHoC2H61;jHr+eA2D&|ZB6@jxZ~9XDB?fc`F$O1w z9EK@I1V(;F8^(0TZ%i;uJWQ5MsZ3+cFwDHnR?O+l<1Fwj0xb3{IV`iRXsi;f?yRM( z>(2= z7nrM&>y(?B+nhUtdyWT}M~UYhPZ!SvFCVWnZw2o@9}S-gUmD*mKOVm-e*}NO0K9;t z09c?!;8u`V&{eQn@K}gd$VR9{Xiu14*jzYIcvFN*p0uMBY@sSpGzTPr*;2M-ff&rDBTW#tWtw&M%sjV3ZV; z;+2+^X_f7j8&m)ld6jsTWmS4rC)H*(1T|H)RJ9#-4s~DkzL&T!jbE0$ywZ@=h}Kxt zWYBce?9#&2GSDj4y4HTK9jCpf!=~e>Go(wRYo*($hp4BeSD<&PFRh=bziq%{5Na@E zNN?zAIABC#WNXxFjA{JJxW)wDMBAjqtV76~A zVjgF{`$ptV+?zcM5sP?>eM>RRB+COUDXSE#Q)@ZvOzUeK6`MjEh^@A5r5%!;iQN}_ z9D5u49tUy513AVP;{2 z;q2i_;r9`y5rdH&ksl)=QE#F~qj{qIxRXkyNtUQyLGy! zdenMGzbbz1@0IE8?vv+l1Ie=j8Lr-YNO1p=ssm@fppTxmknRwK=o7y?NXD(*^g1yTyPd_@%IAtmUK? zvXz`w#?{I--nG{C=j+28FE^!6%atp;tGny48{(UyTmIX= zpV~in?>z1i?>{~;J~TZlJ}yD*AdtUckl)b2(_i?AQj?RLgHwr8%E8vbnNro+;f{|KP$)9- z_iMUPtX7=?08hIAe(k9i0H8y2WwOf~E@&HQ0--_i6Z|R!a!v;ThzS7j5DkIcWkVnj zIRF5=2ml>!kRt#?!pz~dIRJ%E&=!zIKng%Z`jw#n(XgPaB^o9MIywd(4h9A;E)F3c z4jvu}DIp#nDLFYkDIN_yG|_PK@bGX7{YU!M2?!k(6$2Fm8xs?U2pbz4hme#A508+X zkcg6qkertMw?s?L@GDWXutO6IAGH0i{MYoW6PW+)PC5W=B$x`=Ojv*l0AYh*uwfuQ z00mS~cqs0Ire8w;N+1|mICum^BxDp+XoWf~00smL3j+rW4-W?gf*?QWeE<#{9_J~C z1Ol$wD?}-Z*Umx6CVD!YG`U{Lubp>%>0dorIoddtDC!rr~ga#U~^tC4c;sl9QX4Ur<<7 zTvApvv>lU&$PxnSVnVBrvd$pwP(_$4?t9Q;!b1RM!9#8*zZRGfiGc#^T%Rh`Jx zTqudBfAQPKhC}=J&SFx`y}~j$l9^c|j|o^bAYg;j>lk$stmo1eVm^K6rT@qsX5l1(J92w5qu3H69@syk5*yaWK9D$sTf}wg zfNPFJ^%zJbn1%z)!+852UkxxvrA3?vphx3*=Bk_|Ws>I>^>J4zI@W)${~mGo$*}Y3 z3P*&D&$!=4q?pRd;7y6;jtyT!eIs^GcBWlt1ZAh=7jl!a5rg*L_FH^A@BuUCwQCfm1X1JMm1qFV;d*8_T`+HrBh2{yZN~@ewv%qN-cB3jXngR zqWc5^9E^XeiMY7}Y6gTKM46@6+p!ES30QR94Y3vjs47#^6-(+Si2qmcdQs<^0?k;Mgde>5~y^tM<;sb*uYzi0P z>Z*;u0Rh5D972v4juFxBSz($GrZ;OyDXrpc>ozObaQ!Y$Ns6bP zL=ibrrYP`D=lRiI9BFRVCzvEv|DhmpHyZc;%Sx$lb<2D`-nQNa$@HJl(m2zQshuLn z8wa}4Ab?to1SRoL*+|!N8{^iK?J{j-x5C>;oBpczDzQ{ui*m6a??-wyoDAW;v;)9f z#W&H98{nV1__j8y1h%y!hP8&w%BdP(!q&_4y1jrwlq|W9C~cQ^CV#d5P0F1Bp#|o* zCu3hR509}5-&I@`bKh>A$KbN14AC%+_J1FLuKm{GWd_HR2lSJY)|jqruf8ZZaG-prWRdVuS#zmMF%W?h;pB_1@hNTyCJQo?Q$^0c@c)ZYJIKE7}F6sH)ies zZsQokGfLNFyi#jNM^&g-T`tp#e+lmW+W=1*3u5B$%p;=2oW-1VfqsaqdglA<#UBZ$*pq=A24q%7rSUy&8L zmG*h8vgf$7cONK%QD7PQY=D8J>%FEj>JwBix*lFW9uS~ZwwEueuG~oTbnMY2 z^XdTt)K7u-^^A&NmFq|s@5kL<)f}H~Eu8L}a`Iu6bnmDO;1e`d?u$ODQ(S%nu92Y0 z!S{<(On&+iO{T*8>7fB(=g_=u{`5V)F9CC^Vz47MP;;ctdIU0OdCN=%iheoe+H+Iq z<5qT4?5Zz5+N7Bf$H^Xvu!-HAA#J-AO-mT$UEFaATt?y=Pxwk(UpH|>0IMK@7c6%) z;TSMgM6s$DP7Bo)kJLBMdcWH+m&*=iENF*noITUG=~ejL|28;Ch@|I+5o+4YXCFe| zK!9=H%MnipacF0!^gX+>j9{V>4QL~MXd?vh)>mj7iVE$` z%==EU!#fCIR|Gq4S(b@&ldN=7k@`?rd>cus5R)UBz2#w>hL)&yRxBJlISZUfZMv4@ zDsvNXQ0E8-?Hy=2e$~jVUc411l>)3U*QeK>1-aa<8`8j(4Nja~*0!(EKfYF%Lm* z0dC0zV~A`WCC(sD3Sm$DE8vnc79ec8R;pjF_Ai!)6NhC@TYtkPs)$qxqdk2;kLBDq>mg%^NM2V_VP)# zWZLTQiAY0(4UQEj+wJ(FL;gd%)qMEms~ws{e3yc`1<@$R3+Ba!nbLc;iH#{y%Ka2y zAG<~MhUzf+M37Kkh7Mc=$gqb2HvStZW$uP2?|YN+Ro_*5;@8a7D>k-6Y4OtSH(cy7^O~5Qpz5}MQ6111n0>-!;Ve%0uKYq6I zKpC`7vb;24gJ%`ZWdYwfuo1v$1p%;Hhn+Fq=b(8Ka)_;~+E=zJ8_>S=iJ5uOC=_O)GAWUz}nT<{9aC(`B3OWf+V- zX~BL$XoDmA!j@$oH7%(FjRCgwX7-n5s(c7}1+`3kc%I3Ra#LbgDB}>|f?2QX)I)iS zM+|*YqEKJSgXz1IkgNmg@cQ$YK|Pf8#FQ2*(OCNnZwCbR2P6gvy7tFMyqk}Gf=+BweXNc7vhnZRC8H{nE zl>#yn2u*sxnPRFAo2USiy|y>Y=V1he`;&Q^@eHKBJ z2TjicmaI!4L0#4tQ=#%9z61hv?LJQ3;rGpfcaI0-4wveX~x{ z%S2)#;V@wLk%)!->FM%Lad9VEyqC6M*fY#zvSV&z!5|eC?!!^1&hMa##f_a$HeTz< zl?BGD`xD(X7%)c<2Om(gX>e=b4FFN%7ZtKvlXb_T{w$e2DdyEnR(x#Ce2L`!ILLWg zykoU|aZ>n5%NPh;=d9YNh>@3L?>|egFB2-pu}+lLtYHM}tRY@s}nd}F-MpQCtV^alve45wnr0;iIe3RjEaOhAvclPtAk;aV_* z(oP=5``0cqg>;PO=`X_O-w3=G9-TvD=LoAjWjZ|;r9!nj^q+r{IafY0L$y!a#jZQ7 zYdG>H<#krk#iod^P5#Z;%hUSQjVBh0O)_7m4vuKt8e*xU_;++VOz;j+s=yTdpeUF4 zDPN_NjOw4|l@Y=MgsDz;Tgb12D6h%Ql=YY+8y)yciiv0d!3y^PLL<%@HrJW)BVTyW zD>kgRiyLkcRBH^X^-}gr=#9PX91&_KHuBT1?T68aLd@Y}d$Y@982c(iXGai_xQ3%hp)Y(iYU-W6(C>?K#|?cQ;LTks8&}q^%3|#z0uxKESH(- ztmIzK&}no$#;!}!Fh|hP)JWb!@j!zem!pQdgxu*`lN-wIes1PY@MRESK~%FRR-TNl z?TF8?OoMSSB7$5RiXB5bEBD3rR;a)EHWxf>ppCq`t2qm<*4Z&VXZD@%vvm?DH6psn z&b~=`L>E)7dmL-!tg7MU97A?)#$yRlEsJ=PfZh_tmGsraZR4klHFVzZJ1c>lf`$E-=s-gx0n3S$Q;L_S?@VS3B)*CT*Z8Ia}5aujUIFbix5yN4} z_>chW{wU!7Qa1?jO1J$&sny)q1?msbKeU4n>rD0+qFPTfjhY)8Yi+`r-&sNc&o)Ve zGF=WOvH&>{jJOi$Jj@ff_d0x3ENYP|wWH6Wp z0k*yhQC@8{d#Vc~S5{l$DmsM=v}a4p!}wPjq=;$cb$rq7P#@UkaXs^=F001Lo5W(z z$=+(uAXi^z<-Cx7y41Sbn)r65H#&1I?#?RH~bVn zl`N-;M#bsxlmes1cNNpStDuULha1VC2nvkMRC12g!-vhA1zQR&N90p@sMRu~c`P3= zi~iWCuPM9nY7x3(<5FvVug9~*f~vT~E(1eB+h!rDy=`B}ga~+0pti8Fy>rEN^23z^ z7xO1sF5!D_zIbm+?L^$k;t6NpNv4Wu@p~U;`g`3@Cb4U}w#H-M<8XCj%URpvj$Z~N z-F)-GorkSfuX~!sL;#nsS>#&u`x$>`xx}(#gW>-7S-U1dhms!%pJ`r)^24X5txc=I z=^7gEwXvU?pIRjOaXv1|p;^btvp6_8DilGfH|z)u6{D5ZjZCC{qi1!YvoASiue^Nw zw}XdtLnXA7#>g@m*J1=?M3`Fp-ffHvBZJo;7?d2V_EUnyD?iY?e|qEB_}n0FEC9$t zV++bWV_F8!X*oRLIv8j+gov%PpI)^V_>~^`Yp!HF?RfdSyDz<6j68(^V=s0^N?UKF z3#~~kLXS+Q>jVr<8za}93m5T3O3=&`rAy4F!_4>JJIDu{HpXq;)^UaaO78XC_Y}?c7V9kkKUHglR7W?QD4msqBfn;f;;y-%cI{u5>4BZLx3K^3mj<= zpSwn-&KN`dXJbPBb?8A_jf_&)c&@w@8#I{DDTpBAtqs4vi~|9B zDNrpYpZZ;s*q72IVaLT5FT6h_7V;WfprNMm?J*0%SL|8Et^` zjA>5J(59x2nCa_;02tOkS(CDTN;+n8Zr5~PC2*>|7hW6g7iU6_k2rHmgEFArPw>>l z;r~0??50o>0=!L@ghm^oU#VZ=;KS7zpF=X0;3DLy!G?xG@jtu1QC8-28r(%epAjm3*e^{c}d`~b4x9pQkj zD>4rOC{=K5UwMg6-R1r?LwCu5s6SF&p4QaC9+m?RODZ@GL)K_Hqty=rAgKf!ZG06S ze@M1+aImi-Z|Sp-*p}QS4Wu*`;r|ZsSB8r|ZJJ^eTSuRmbqu2R@$LLXbgL%Pmmo2h zuSI)~X^>_L@96f&Rnswp{8ThBZ_j&TrF?s(pdNQ+mZlV-8HkLg7&>;OA}vw`JHwFE0s$WJ`@-7yT&KjC zqaT&BFvW9Pz}?;yVJ#9RKlCi_Y4t+duUO|A&iR()cka*W4*c?YAwcmcr}wM`1VGvR zYJ4GRO_A6K0qDLVW~mv0|8x=RNu*Kjui zooX&u8-Tmai|8I!*?vly!c`jAV=4d_-ppgr1>3;0^zogN)ESgS+*%O(<0)`~@;`Nc z|9;FM0H&}e8mAHjK+eco(c{{DptP{0r#QV8Cx0jHRgu1CjBngE3GOm2lC2vweR$dg zKy5xn@Ju?ryWa4+k#5<{JZ$ z6ZVG;`)=YMv$SvXu9C?5ntce{C$v^h_iYyEEmm3BE6uJBIeyzM3>ATcigH$mEF1kf z(=S0U9Ky_WO>MEdnL2`lK*dE_7+|dvqd<>*qlLMBJzv6?3o1`q+ATsw;siN!k93C6 zI1s60p-cegJvtQiw8lfdZfOhAoM&5cq>b9cvDBUuB7rM3PUDSaE=M+>|FVk%iqY2l z?jwz$v7XORVa8|c_I`Z>7-C!2r-ct8vX4=%mEu`2(C8!*Tl?nV=3}Eg1jtHxCj$Xu zY{gE*9?(l{j&FVefeIY(9$of$`xYtYJtp)Uvj&=_q0ak1%c{oUzQn!eCe0B-}pn=&d@5M3|=zI+IJ=8#e`8`>+jB$)=iX}!6>51}&o z^MNFniGca(Ie|Q>W5-}*dpSAjDP3*THJ!*hT8nX{8Td}26fOvJ^4 zX&yxp@{!}pOf#zv*kpGRY*}J@imB6hgLk24{N9 z&t2&Q1lI@nl~K47^0rItee{z%c$t#U+88~7$GbR zrA+Iao<#sn&6+~z_8r2g@t!R19ejKi`MO|(8l*n z_`Q3?Y0MzSi&bkIH5KZ=J92NX{HQJD5;zN6?k)0e-oMlCisI(-jOtAE`e$pd#ma5l zk>L4DNu^Qiv+DZzoA&jnh?xZHcimIl`G+(+X(#(Wbn)X>`9pYW(yrv}&|n|vA5^+h z>;{S{skI};4w%F)+z!3r0ch?2GDC4vcl0eu3pBvgg{OYd{0lXCc+^$3%-lN+8-W1&lBP8hT4C`!-C>U|4%QS2e3{>Sg_7O?ocB@Fh%DSl{v+&+Bw=SF5uk$+rUP zhWZ`}t@=ynN`5@cianUmvI~}1bQZ>F96M1I@$5*dfKVg|-~-fPsJ!OyQ_1xVS8r|! zR*sj*RB2!uq_x{%n8r#ZVyA@N2ArAma&ov0XtoM2J-k_cD_m!Xi#Hg$5L`|aIXK6LMZsKx4X&GG5vV!6pp0rLprSFD?y zndKu&j?x!>AA{|955B0KnWL;VxUA-TB!0b@ZEvr79KFLIo>{S2@VMm^*00*y6p|a| zw6^I>O3PbxkBjrG`!>s5dRXOJ+?7UED5+6MT3e8yA8%*795Le4qkc1p>PM~c`BOfQ ztnj<1_4OpYMhLvHl(%!yj?Ar-ta;Uwb}ZYJt*x~j0tSgJ?@Q$R5sAqYw_Iph`Pd@GGiWYeb_>se&ak2C!0ovGa8;nT(ED~F9KIgtSxb{dWm}vVzUSOIq z_PwR{mQgL6cxt<)knYfIQJnF3PCj^g-`KQ@TwK&pYHDbAH49N;)eeQUSLkTi4^{P<7XQAxn}t*Sun6I*%IK)e;Y&2 zTwE57w+8_hy}4)Ja|!zYTzzb!wO)Gp$FWAxIdy3p zCNV!_pRn1_X=7B+33SeRS35Vew~Jgd@mNQiGMn4|Esabv;{l!>9lc6Q`kG%(S4wU2 z_p1i*^X$HaJ9m&Fewq>;X5d##JD+6@+fA2K4twhsFVArK<>1wcP6=^B6}|(BYQiql z$Xx}-gZFBKl&pS+e(^o2i;zQ-)zdhc4q^-5h&})i5Ggh7J?+gPoFc|!j4hw<2M8Q^ z@dqyXRYilU@7?S~QwgH|3Z+v0=w6_0kMn-?jalVApkN~&+l*tX47<0=xAqp=uScbc zcKuvagUR}Op-Zw*fHqK^e>0dnO98CBz>=xS7~oi z(260XqDX;deXX=Hw9m4=G(`5|#VdW6X4xjy4W1WRc$@Kk?RM)*80aL-$<^(5Zs}8j z>#*>+uiV5qo0)MtXh7Fs7UFxUHA5%8W9=HxqudA%QUl>K4Vdf=^X13@n45cn-1TA3 zhjz8bnicNuw<_G293MG2%t96uB|QM}d#8!SXF5}n<-Cj6@xH6vTCWaEUoi_XeYBj6 zt&+GNc;@h+Zqk$umdE^iU&;V4U~adk(tocY85k>D4r&Cz^6-x$cWb~3Oqt|%qjJF&8F?!d?)TiRd!X&Cs>Zc0f<=FSQ z+pBzvACW8Dx*;s;1E|pW+HZb0QX(M-u$0F&N@qx*BJ^ynjQYHo{p3H_`nqq1

    zZo5I*!q5A_bKAj_^id|2?al!LTjo zH|5ZmC$}OvYi=K4u(5H|btiD;Y#8H$nd9f=!tVI-%{2=Hh4*IB4{qVD!;>t#IH@-S zZ{j!yU8-Pn6X!6gw9qdFDuUPBBX&>iQ@OQIH||ur7FZ9h`Jg>w|FTJxahT9^ue#YGSBlf&k%%kKk%c=;oR~JJnF<6}s_u{psWMDa9u!J+x}` z?r`-xc-En5b#nV|vF#Xt1p<7PfAkH5;;t&OjXVe-Dot^!4$h&lsTF-sA6Y@jwmIufe}P&R-t|u=_vJ#}wi3-2n5y zN^AxtreeG}y;i?pLBkrCVjhSh0{h|wEh11tKm`IAE<*VMg{cY~<;U4G{R4{c*J;qr zvv+gEcS{#v_5SZZ@P9*S-9LH-&;ZS^7t2(U{!qEqFef&BEC2ztuRn8Lf7uTM_ASd) zew!27-{vC=y@?t3w>g1&S-;H(lrQM{tB8M@kAM6;c=!I#_kn!>|K7_d@z5e5T=ZYG zLmx{{&v3PRwzSE=guxZ4F5v)QrU+kag(ja~)tilF7Uu{6&Mvxt(VoA2@(P33^{s<0 z@nPZd+xpD=*QA`XWL355MXqj4dmWtBu|Y_}Skh89uU?rpA837B^!m8E)f;Qy>y(~l zLCgcy*Wc@guEP}GR(zP9^igH*Evegw_4Zp&=-zGcoycECEVgQ2Fm8Dqg>r+#cNus7 z*PAcm-TkQLn-UAZR5zq(_wLh#@wo&515EO3ugm(#oMft=XJg|rQ3aHgAOL_PfInmn z4M|RX%)i`nYBVV)Vjo~&;X}(oFyUB=(83?iBjf4&OfHGci2Wgg1ziqE&z8zOfAU~<+ycu zUH;<@=YcLF_iDF$Ds?F&4^QUaTlRtln*`9 zavg9mV=-R+uP!d9{-9|yM0$?PE@EP5{7wRoeAsTpOllqzDNq)#BYkz*RHpC6WbaCfoRRfgRFF` zKJn-+Z>eH!mjc2bJFM^AE+U!fO#Nnj4`*+6Pv`g)r{G6oUAcbBC?u)`3u4~*=BORD zV&XwHt`X zdd^T=Zqw6_kC9(8r-YE2*BtKgi8hK(UtJ5AEQlLJ@6EY5eSwS|k!VP%fuWw2c7qnj z?X{|&3T=zNp3)f=7nPkl(RKh4SxSR6;Q8hqG;l(j)XMgO=NR2uZ~2Ztjy_3X{dxSO zx7{6gKn#Gyj~aytOJ2CHf@9rAd8d7q5$=@pkU4nvIKD%{=l`(i(T%x0#}#o|5%d=> z_}}(zDSuBWJt2LR_zay=D4^#puKdp_?jo4Of5isK%j|t zppiXlkv%|s@j7rt;=9Cat{b&NPqrSta5TMCvn7oImyhC<_IG&>loxP}#P>Fc;m#8Y zWSP*P)Bu-OC-)4r!uC|$x%XAjJ(j*rKj~ccqI>4L5+`g2Xk}==Py+K%@bAp=?U?Lp zGegqPL#t6+%K5a9(%f@{j@#TG<+Fi35&vzCVMXf6#3*aEbuya}CToOhG?Q?Yq2iO_ zj*I>Ua6$Cj+B;?`A${@pooZ5npLna@P&*BoQhFRJ&TE&R4kfx9Z$*l>C#;Dr7x`FK zHn2S^XxnT>kt&}ULnS1tgGyK&w+oeU`C4v8Vh>53>t0Tx*CZr~095%PNKs`I?1}3s=EWMt|rT&*a&j4 zG%<+u_|a|~hwQp5WBH9!be~nvB#6yuJ>FWNR|PW##|zSCi z`cWABMdOlHt6M&>u=NvlkF%hRE`dBRTB7)Hb2O#t%4hCY+1`cvY_el3^7L1DBRhS9 z)k4|KXZQ2YoywX4n)H?V&PH70hqZQ!Ls7bPDcku?uOWb&X>5NaAECz^czqY#G4jhu zg(LQz2*XO7A(f8Z(hp7ryF|@Kvq_oUO&@2kYrbpO7#pMw35P7kiH}JW)2v|tV=%3_ z>pKls%RIGTR=MWErAy8usD3MD@(Z480K&MDm7XZY^wv0!4%;j(e*JKGW(wVUFCPOv zSwxx{%0{l{WKrIg&;6JfrtU;Z-b(bwZN`{1o_2|k#9xD1i2uNuL3i-{8S($qU)cQH zg!~`uiXh*8Ff?2g2m$u&Uj7$EyDI#`zp@CwqtJ`Lqe40FzeVo?7(IO+g)<+U8y*6E{|0KZhx8Q^M&AchHVoS)vzJmv5z@ob5&i$Kb${HmH%F^_JL~Yma%jPK?wuSbYJ?fL2qj z9o8;m$c@o95FSsu3p%9)Lw{`6T4HD8BP*9WB_?7Bn6C(l+SSK}(|q!Q$_FMrf3zQNY#-d6?4uRyz2>~aXg}VnZ8=8Z*}p-oz?ggx z@RRM$pqkRz*;}Mr>k_i<0>L`0AbKGd!@nO}HzY_u8D!6{v+0t5`SCCS;Pn(*B8zY% zMHIQ&EuL% zx)rWNGVvT3&X-@3a7m^XEvOimwvSU7(DfpJ@m)DDt&pBkWGU6*_?1ouDFjH4zO#UC zBAi)%?dkAnBzBnjIQAmmK8%@5^OQU?xAdWUTX<^oVDEW+oVIsl)|QzoYheFQd|X(w zyV2`}1kJ4p!}&sK=X1|e)hXAx$`&>=dsL%`BSiggCTFouJ`lvJryM<@Lv#YSS2RDy#^E+%xM!M0`MZB;rk~DHgA$yreIdT&aSzK^=J2Sm*K57tj%1KtX7e6!5iv@!hpM0Mt3QS6 z|HsPx=N9kZwQ^c-ayOJP77-Se}?z3&qig%(OCJMOc5ksomzg^~rRu%0iQi#_FPzZ`19<*9B%SwNtuxd4rVg z&2rl!rm7Q7Cz!+7Iu%kdToG5rwIdcUW=y@f=X*BjP&Uyb@**Ni;rT_PIA$lJJR64G z{AZ`gNouX`w3V$>@9f!>$gytb1-w z?8_=9>s<1!R3cw1t+=5&&A{P01W)b7@Nb+lGzj&(-5AX3)qldK(C|v=&e_Bu2kX}9 zVzyoOwp*kkmNI7#+8W7r9mNMh&1SsGb*Sq`A@Iv}8=#CCi}CDKyS)4`9U}H2|2Cc9 zmVlbzP$sQ#xB6~5c6M6-P(@{u`kd{Ex&dA{L9T-fj@YhrI$8vvYw1%v9eZB~&p=P` zUIV7Sqt_YZaPfl$VpTUNy0=)@Qtzs^cVl4z>aO~Rdi-)%InHUApzf;InBHy@=g*hn z@sCZ^bNSs-DZ5{1Lx`@ROqG3N+`aOm+D&}EewC6aF`L}0^US5^8WC~GXt#(F_!P!Q z`u}9i`r6}AbprHKBLJ4H&lZ{1sdHt`WVdL}^LXrua^iX|^3EXj zj(SUY>R$VHPjRZ7 z4GeHeJ}2xB53MyGDFlH9_o42vS_pJZ9EG$#TL4r89dpaHXing`Ph=WDk3C_tb?big zg)G)4vz1Cxgpai0;`HMO;suX)wT6trbMt+qp)A!{W(fqO%1?&ai4HKBWx|-<@bS&! zjAY~0%f1h#p?68CH1*>Ewhq+>d z`_(ZkvrA<+TAfi}=E-pnd6hRTv4qM!!!haRh~GX2z{sjx!5K)S_O)(m*D3qB^hP8_ zcW2RZ-(3kCuJmjXc7B=#Jz-JEXWz~%bNz+%@xh}|XrOM^4JXtk#vl8Jso5YGqs zh?oF|v0<(b@&x}gB@xP|^3}puY<7Hvc?8MExT`h?=1-XWy#E(>Zvhp@*6sT?fdC-{ z0s(@%ySoGk9-PJ{xHj${5=bCuW5I*Fy9al-(6|TJAVKnK@9&hKCn~;)10fl>R4U4R`PO|ApaJR}67^q9He0Qd%WvS7RWt;lX{U8*R5hPH|Hn zYaD3gE>sBU_D?)o$h{S6gSXN;cCnNx1ffE)Cm98ym6)a=5TXp%_>IVMcEAgc?3{XI$R%>lz!3YG{Cbwm0@JC^DsNkI!-qeBXX<= z&HG}e0q59h96h9Qsc8MMHGpw5(6xE8mgAM9!WL!|>4}1{bd1CHu4d{pAo_nBEPW%1~pL~BmHN~=T zz4QYKSj9#79XEt;sWw6ZRZ90u^{~nE1%G7Hv^v@ciofar)5c3nn2KS%#riJnIP>*i zc$LGko9VZHxjX-bzy1HYJO0a7@0Zc4`nS4_7XEWa#ZORmsoysemOCKS(EaUhx)~60 zdAD9(`~(pIey#k%#h)OvZ8wcCpDjqECI)%MmWe_yMon{dqPbSeG1HAp^1yb-E|h-F8PvvkMpr^2{d?b;gyf35}VIk5nzF5 z(Y-H7U%hdSNO@aPh+}Ts*kB%>+i=wAZsmT!d?Oe@Awbp;v868A!JaQkNlWm7z*qmX zbBXa6J3FG-Sse^F!EmF5ma7vj4UAaWR~2#dnO0dvba|q*P%fN_{e{O;+gJZjlpnysh zmQi%$rHbB&b_vZYgAUbV$i)YfTCH3T@}nVKi^q8z9uz`d5Wn_Utyb6Qk-}3yL4txK zOU_FduC~`rXWKGPF4#8#3`1nL`q=ZBV9W{zC`ZtT$VcMed#YW)5mj$PtX(bzQV|hr zC(EA}^Y&%q2`~|5KgXK5Ixw_Qxe;!fB-4@8Vq%6`z?SyWX`he^#3RU)Z_)GGm_#C4 zHV!o?R)Wyx2yc2j(i_I;yP0tj5*H`inL zO|?#{a=wJo{AF-aCg!4ck~344VChD_(W7oH&@fz)by~112C5FMy5lv*`;&=M(zsV= z=Xoj8;;T?8Ns@;jx&)N7>8`SKhng{FCwX|{;*^8!Xu=mBL(mzDtTY20;z-RwGYLBe zPq}vY`)vD6F-{T5@FENC;avNAiDv9dbJz{broZ>$Z_?Vb=p|VXi}&)-hvN-Pimm0J zm3SCGbA6|qusgQW*DZKiFq(*R2a_Y?0UiG-w=BI@ALT`E9EO%oP`z7mZQ}uU3kyfRNM)LEh)o zZ@rBzV{~M?WRLDD#NaVPjrrDK4SE7AF@bNbX8b`tY;zA}bnV^haQSw)i3Lu3!1wRO zFJ?%WHB!6(#Mg`PYdCqAnr>B$g_LQ3$v*5*v7Ri^F3_2X#aahO@q1Hm&wgv_EQ-@e zKhjCD=fE$PDf3$U;_l4}@JL*`v2CFEUn^bpYT1*`d_D!e^*O)b{C_w;UJXwEGtjUY%us8W75=_cI%g6mvn``X8k^f)-U_k1RBTs5t(zME)+>I4dzdiAQeH6I%@X& z>k#}shW!Ygv}@iMTWg0_>dUyC<0SH7&TK(Gqq4#qWu&GnjC*X+@XeOhhO`e)4_d0| zKBji9g|57WW(p)cLh8fiJA?n6znRaFO$;7u_B^LaESiT#kZ)5T&QzR`PY8+@T_!GW zGZv@GnZ3~;-k%(Sb9SUu*?Z%$48mI56kJB>s-37BAhJa~maIG3;f9L6)Jzz|WgmFT zA)RH@)`yEbqR&wF%MB>yIW~Z?G|9QQ;RGA+>41Z`Q_~wRQ-dK;{AvCR(+1lE< zaW9mOZu}h;%c6`7lWn|4g;#1eB**gHNyY(UN9ZIw&KdoX*cy*u(57~gw3c^Rgg)PL zH4O2`rDaXLk#&$c7_5Z?^{+Inv7tcm7)H0+@>^j^y2C&h8L$Hc+9WW+wbtRejl+dn zf#cIPzKcWMnLyz&<5eAQ*2-Rt1!EHTqDT(9Ws;TGR@nw|YHJ9^0tRETf{Kj!l8>~# zYbQ);atFWn{~^4saIwSRiL!c}WvT7%HLl^952lIU}6E4;e zeD_VugPT&ONU~6m5+hyc@^`{z{29PP`erlg)09n`)>^=(89W4YsN|40qkhEV0(gT= zn$R2798Mnm>9{DE>{luPeitbXstSj9Ya?7f+ac3Xj+w0a_^n3Y%o;P3KPqJ(&o(aD zBSoRUMB4%<*ux#lFW62>jxtnN$n9&U97=bnwDVhQoRc7C?wc0OKx>r9B0cgUVWJ*S$(YH))t>mb>4}5v zrH9dr<#mlO8{{8%z&w-0%%J_0wal85T@<2CH<_d+gz&?D_(6)e?yZVyjZ@BnIG+Y$ zWsE3zAIC9~Dki*N)b|889EV>tN4siNU&kG@hQ*Y?WE!c*TfKjowI$6s@)Xq?tQl2ZUZ>%hTnAZPH$>>gkA{BeaIHVLM9tp)VCij~t#H0ikUPuW=~Q1X5ET(atVqP5qkdeHiG5;;FaJKlThmTPyai6LO)P?O z3(0}+3rRNpw9_VxT}H>vF^-HzHBAC9tEk#d0Hu^W-O!F-d&CJlUFwukeTZ~m z46ofH`QSd+xDeODjDq}UF~4BJm+X`NT{)U&Jw#k=Y)Q4`cC3OY{r!V%;}2Q z_a}&DJ?ga5_OPxvqKSX%$7$))+F-6Qa*b){jW>JiG8s_XuQh;^_Yn&<5`9muf={g= z$Ew`iJ~5)Td3_wl`T%`ApGo&4Dji0(00qy)Jg{TZM|F999(A9T{y=m3@F!Zli@zNl zK>ks`7A#!0cEs_pYwIpPOvHih-r{mIF-7$$Xk4wVTECibr&4s z=RG7N41zhGo?U;5M1p)QUs>SZlowYX&kVPYW+-oPwO@}s08^^t_1$5lIxJ?ZB~Qe7 zsCgtRXw14$b6KffJCvV^EhNgmh$*0vJ{{EdyB9)vnyZ*aw1PjV z)V^kVWs0ZFj4COkAkWOWjTdzE=-U%}4TI;l)ilSV_nMleif;6BNp`z-~0oA0tVw@rmODF7dAIg)Mza48P}-0!D{&1RXU^c zx+ghHgDVt4s+$rNY>)yd2+^_lF;x3BJIM-R`+$}1PNaIZ_>7st~W|%m1A#&3=t{SZ_ zuK9fN$d16fZSd4%9AXy}8Teq!_7P1TdE@m0y2RwhvmJh);`xF%IC3A1650c#mGoq~ zWq2RIV`yXXW$t@^m*W-Rq?X8W!0;3IS<*C(N8a|y9L>7lYbx|F`@Yni$o%#FS)?c1(&TLAVG#ZF5YjAv=W z;|J$DA9CZkgHhRfIK9ZJ!ogV|-X=&1L@3101IZc+bpbLqq~R|;46mh<@XMH!I;q@1 zj_iDRMaLMSma=_T6IV5^CR5fLJ) z?6{T|O_E?^4K>)Q3P-RU{Tgok{H;hH>vOJ3~=)q z=YGKd0E^IiG2?dDncR-Ziysy=RU=V>mTdw!A~+N@5&Zd#5CE!; zodG=)44K+lr5aLqOgf}qybb+T2=t4tr&u#X6ou=%|x#ROx$RKtf&CeK7hsu zT3;=S!%^cNGH`nny&u8>efK+3w zRe*)UvtY@}6HtmI_&ESdk--c}Kq)c+(J%7&2FLX_V@H^Is+u$PWYiL7BhP)gAS-)i zuuwV@Pl<`Pj797liyhxl%tgh6UpxqVS4`1p&YF2Z(iLP*<3Jzo!vlJ4n5&<=%*Ad_^haRXeRpg-TcHpL;y9; zJxT1uMe436WozLBxWygS{p;?QiAjAgO)YtY^jbLWG;ItydEiSWZ19UOAT-c}6vf$k zo+0XqYX*rs<8zg0?^9cry<+TEj}(6C>{ZVA=hB&&y{-l2>QeNDZvt?Sy2^KT z#(D^qiZ<0y>qFh9qJYTk(ED?M8bRr6vSf9)sS2bUYOJ5lHUjCp)zcM)NWyZ?iFRYS zGdVksI(w5JeLtLz>{*vyN&^BBtR)91JPJRVE)OY5h}<~0K9>kd87JHLle zA%93$#(D@=Hvhe0|MX&&KWdrk^#DX$YBPBQ?yYKceC*Oid0c}w{SU&KAd#^fpF$~- zgH(eW4?qXSz`YFxf8iZgvc=sWk?lH`t(wr9Ye$8aM>1NU@-ZvXf}HwrvFF@Bwl(;+ z(!gqybehv&^};jwOY)Qwe1Hc9pqIROP|-xOVoh6WGRZ*0(6iDEEMu z@_LOFUm;dOOQp7v&LHuY#pm6(sf}1-@Bp1bDsa>o+hj=T;Jzdsr;hxg#^#`tzTji< zBg$0JgPImnp$c~^)8r?IoD6oc^WH;k;b>@RUA8{Fhw5A-$d(PaFnxVFett3X2hw~r)+pamv$NLiwqb3c1W(G+6-0RL z@u?Q1Xy)Y<2p!hMP4>@VZi>8bkX^vkozG<6T=cHd3Hq3v%8mVHkGD{t@i!_QklbF& za!UhkNHOj>$Dewq?YyxZ@Cai~$0s-}q_+T05@gr_r@SMKS2kId1i|;9)?^X{_E(=H zucU$f)jMZkTT6uJ)o1Na*{mdoF5<&vr@*HDT?)M@tI%&H|6Q7?YK~=pyu#-m0tS=X zp+Et*2cja{I|ny>s|44UjE7ufjO-;dk`-?eT<*?Aks_r?Pw0V%Mi6JSoLudAiFiy^ zIA_3XHHGvCrFvP8&!a*~UcCv$zH||gz|QV2=ZtfZCfQqM&*P)2dCNf`U3XgbJ$o-3 z=XQpQvL=m1{!?@4^2PyQK2_Dcr&ex0b+IX8+R=fIfOSlBh)|L(MR(7FbCE-kLmTfd ztUx@&&*nz3`b5=bsuV{37M{_8v2hJo-MHQpZpyzT{thDveGmOoO0chG;Pxgr<*ssg zx6%&q+nQ(Vj63&o`Uo@e&!()t_nnRm*z)2?j>K}YlPxtKVghIG)kC%%?@BhP2B8t< z%GHPFPJ=5`>(gBOh*~FRwblZbdKGRJ3Z2|?Y?Otl(Mw$E*>hG;)*vd2)2G+Wt1`^9 zdaE+p66{59#CoN`S8s^kXFEq-qDy>K(NmyFA!3&3f0;uSs44OZi}oX#=o;R(%0C|3 zm1^`qN@sU)GDboeZ%1AXH!~YJ@)A}q+JH``W=}K%MvCT z2+BMBsHa-x*!(8KljpYHZ~JAe-5vH8EcdiJMSb-GV^1}#0?kp_lGv6{(74&gdJ%iV zTbz=85s$(h93x1DouK(Ph%sCmfzT7k!sAzoF|{~p?Am2E+uzF1${TiwP4A!kc#%7} z#Nt*Q=0Opz38Pcv=Z|%M4M>995Qveb#QJ;2Rx}qwqQ&kUGBYf`3yu>KbYaR(Z}Ac5 zk~z${DVNvyzCF~_RLC->tT!v2$((T9Rn1pni_u_`@L?I-u1EK;%9(JcOa(u%-nU+MnQt1-A z7e~Tr>GM6lX3)(YE_>Wn-ZwTS%b7F+3VcQaKa8`o#n4bRbZgy|wZF1XZph0eIG)Px z&@E8@jZ~?>N_p2I0E7)M@O~`5$+`@gVyyloJb~aC{5#UEw1QUID9llgklr6rhC1IpimRSeF8gTxBTf#6_H3FVLLmeD%}!t3ix<7!iprCY zEhLw@)|8pD$M2qGT3KCdHaSYzxW%vq5LR}}wkRCN!wUr2gSe~FKlq}JADRKX4s5xP zZnN~1PJ`I*!3%`8^+d=cNN>m&UEgB!RwwgUk`U zjkI@s;-UV=FXNJ$VPBwIn8-RsK5ekp`on50o*Fy-TV}ATXWF79fveX(-rgO?SN%5Y zf*o`_s_+y)SE%K=BWH0r>qxU)!GQ{t{5^@6ozO!7CG#i6G_l{&+MQ%6zTa0hzi+#+ zGnBuUYyU2{LHH#@=nqi96xSb`YNc;WCr{qrApzA!szG{xOEZ1{cX{`-r&90J1S}rJ z-v9bu`x^9#cOV1>w4)xA~xKJvd+bJ*4mNbrxA|mT_ z*^&xa>xdKEFJaNA6C6qECsU#|mBJl%s;dF*PJ)LRM2{a%x_w+tY7^u+AHb{8E8S>8 zB=mlR5l_;NOq1H430r{C4alGCxN)4%Sc)-g=NJV(Oe=V0d%@ic{*Bqr(Hy;oUvuN1YyG){fRctM|^lGg>d?Ivj}BOP{Nr9b9nd zbLp(DKSn*UTlE*E&}J`U#9oLkJAMWwNPR2x5Ml{aNXmL?I8i+QP{h3i&`wY|VQQ7v zt0PGjCfj#f5pC+1O=I<3Hk}zSi7733EDdY z5()na1qk^3UstTaNoh|rRuM6;X-qzT>}7&_jd(%zK|+aly%NO!Vhf7Iwb4R$T;)qU z7PqT$6xJEXcF4N(NHDLQBaO16pNac+50x5X z-xjEp9C{IQ9^rL6y&bQUnrLC>jv{#%w>ZTZ^(5MWK{URmLp*38ZniA^rRWio_xgjD zXqVO2N85D8m!_>XP1JWLyLP;DE_S#sm_Z;!b#=5y^4lGJ%86&_sP3(j$2OSq*_>~d z=e9c7v7ka5?IvNpP8-oDy}HR151JxUQSy zj&KEyHBD?SKrA`Z~&hOp=omLCt^a>3^eF?x(V}T;%fg zAt!LyRj)sEV2TBbPTpK1Y&4qHR2jT)h&|~0E_scG^km~`231%!)cI9_lwbg#_NIHn zly_C;sozwDEgti8cx+YHqSoDc+tLlc;2S8rO9a*~|DwtPB$CiCv*Xjs-Lu|4rv9x> z8l)WhyNG2bbCu3o<7;8~v_w;C%^ItAxDlJdUx?FLj+Q4*F+T_`6H>zGJI67 zZ1a+mQc87%sSFCq_Q@lOMfGWRmy{Z-4`qo5;ANBpuH_kiP(0r{M>H5Y6>R0X z@H1bkbsbtXb4rWHy`EQJaMys*oZd}sill9Nq}r}++70htS0-;I&wS=u3c1m+Mo8r9 zAP2-xd{XzA0Ej`qQuM}tDAjYls2EMqt zwPNBm56=W82I|eQ0~8MO*~Qr39*Mf#=EgE-=R(CYQT^C;Z0KC0PORpH|AB^pW*rjV zI-1aBuBN;2FcY^tk9y>enA~&|^P&mUCObkO0~zJ_bwK6{F}F0u3mHbQe@vbyn(aYaB`UF}>UmX!*(>_zmm$35?SUho?La-3E z^Jx6-9iBcRSPt^w4Vh7xqzXdfq>)@uGguN`J4tFSj2>T=>)C?azz3 z-;V#)1sx)KXcfd-XZ&Hw8($b?-R%)=l)k2T9%>i?rynbc@WJ~?M1gQx@0Z7|fkqaB z2AN!a8ZYI_Bq}&qEx8kbJrCjj7dW<1!u2iUT!1{Fal<0=BfxJRsQTjsUcmGVPGy+d zJE#Uj(B&%Y!nM|<7bgb|1#X&4HqFpTEc<5PB;();NBsu2(#`F>RlR0ut8Jt@$!VH3 ze?HLam+0r2=Hfp{f0OJ*xav>83LH0k+YoB zAs#FsY=g?}(9IF~0+jD^9No@+TE&{ZG0x1!$wuBi3rc=u8hkm-r+lR$wUUNyNz7Pb zJw*2vH_c>J7TY{Dd4E3rlc9evX{4tM0y_Jp&+5pNO9R=-GdqmAKB7tJ@C3sblA)I` zKWMXxaPQCEX~0P;NDo|_FsW-=O zcLVs*dJKMofh4&Wvg|$#5M4(hvRc&!V`L$c+ zi(v$=@>des)?fY&fOk~6o1O66s=ea}{GswB|Hd8tFL|oE%^mqzu`&C+ZjpsRMO|MT zF9{1xctCx##wxhV%6P1Ja2W4>iaoc4za%a#WDt2oCClL~FGW-JGNq{@MN=1%1{W|& z@_hzc5bRS=qL{y(VfiJExF-izU6X*EsvcRJ#H{|5H3iEsDV<1z`1|03fweGcJ>sSe z^9G5F-y1ksRv?HT4N(E9*~a4TX>CH&kk#SdH@53f;8fK}r#Kkc-eI zcGi#MuEE-|*!HC=XZ*uYRqAE=v~2Y)YP#$G ziZnDt^3M1o9bWn6%+)Nj(P{Hy;7t*C$0c@_7w!vD->eHJ(tr{_l0&dES7pWK6;u4$j#QQfzIrh69P)#D zy;2VEQ=Z4kunEqc;5F(|!6TZls57zdja>#uQF2(^y+^ZcabTzSZm-z~;jz~;z8{Y867VaY zhnMGzmRts1?F;v@ai5oGlEGvco6utwQJmsTm(RJzJcQAIg3<&OZx9Xccr@h|F$h>3 zSz7je-!n;1c8QJM&g)$3R8Nea@bBIc?wsgYL9$x338u_M`yMk|>4><4P#&NoKyezD zXxe^)PPZ(7g8Ts$bNBWUPvE|(*8b<(AhqFHidy6~(zgK=2=)$I6Yc%Ull5^CM;zG! zDRkXd)>3o1s%HPFyY$|B{NwI?dOufG(~~>h($Z>dUZy-W+rNXDHM2}O*6$ai3VTLJk8Pc>rKR$i@FVCj? z78Y=I<==laMckWzqYq*KbIx=~`_pPd=+a`BCb-FzoUm{wq~Fs7uN1jd>@Og`x>Eeh z34^1Y!e=_$evq;0G0OThlfD`$4~j=yPsU2gchycx@X8aW-)&QT zlg{-%aiRymf2T{ETN!uo9RqHX8x0ar^JA~N&s%i@SWvdPm9AHf!vuNQZ02RL?V-ZY z^DqRq1mQ+(W+bwf{H!e1wNpnJ_TwUTJv^2wha`OJ+qVeHlTm745Yn_SXo%Mwz4R)9 z>@N;$`BmY~zW#dBzo;s$W9vper9Q$tmmG>F*UkfN%Wz-4=)kC=$3&``BW8#IP*O z;w;Pas4i2QgiSHgSd&v;pN)a%y=$8^;ruASu_Xh4n7`RNI-NIQN}0|O+_r*G z)}I?Lc~%I5VHWx)Qv|p{c*>c&AT-o+*_$=N@|(i!ql+L;yT`EVk|?D3O&bo`^``NP zC+P_htM;s)qXFd){!}y&s!a>@`}QRFCuo@oSoIMUThYP~`x4B~Mqs+P!yy>ax^PKn zA~#~Zto5f4eGSNKu$OLw2JU=-#ed858-}32BIJsm6R=!ee7w6fILh4PHaI07Ddku5DY{lDnqD@>npkd(UknK`r`}bw@&{1@Jch2g&w6 zc;xe=mJUEuE`02_^!N_T=Z{9vd(u8YFPrrnX)?HI2l|)95XhQHb8kHi1Y{_wA)vQ^ zG(+mEP3zVo%k_$1dv1n5>GhNNo9iwBfWdO9{719%zkNm){=af5>?a#388C$b1jBdi zPsM0S19^q$_wQ%pj3T-QLhQIQtI8=usVNxK!?{ZdL&olX4e+l>@c-f+lR4=E;gR_u zpEpm5;`_}e-sUOV6sXeJei#Y6W0wSzIcHFt(ZEbW#E(_u)9pn2O>Z;uma9~H(T;i` zOYrWjt?Tm1paS3B4)LMZfV9u8uZDoYwlfnzJGhzC2`K8PM8*q1t(ofnopum5-Q;3= zI`>Uo+0CF@9v>*R$Q~S#{;(y``crlJw6~~p_X2fu^4ef*u#LUzp&KtDHKL*o%D%H- z(0SRao|QpWt4Q6u3NhiMjTQ~t&&Rb$lNvnL#Rjt0aUw%MK?vs}l_&EB<6V>IQ!1^M zfa0Lz&b9{|yQaKUx3UAP78F^(H8U>qh!+DEwz0bRblkQ*a5I{+_4w9$XUrCb{T}|H+?kJ`QOy1PqhW)Fs0j#*!ywUtd4t& zL+I2?e|rvnUOt(0gx`E%EuA z7vrJsY0}|*vB7g&bMq{Nhy)lo^xM5i@S*E#81%uy;G+GkS6g6-lb*$tlFVBh;R}uG zDd?%fQO3(3VI@84tnFj{Z>CI6W_uNQP~!*zY`jXsmeT(%WA^`}iN=olEQ-45mxA9{JJ9|8N$A0dvd+}KXbZ^d@7^9Ig#XQV=jnfm zOChCsbAt`A+=tXanQBU)We|1qmr~Kc1GLB9N@c@GU3%(lEwIlF(LUK+L_Wvk)|Pml zx<}1cWHXN45sxYcA$;3M{9fPkm|DnO>{vK{t&p~9R}MF`HqDYCek*YM?6NVlkN>Qtc1aqSCfyrLo&>SSXRCl6Lh54?e*XpC;ZcRxYe=00VLO z>slPZK$Q7{Dzj`>mxi<0lYg1csVBKUyIm7Ptd8(hr=R0FJc)Y%h7_+55-*Q)7|oa3 znYiLCJ>G^&DBpuaXO9Hd<=9wZyYD6`{>CD*9dms0-GU7QR(o^1CZ4Uck=gIVw1N(`(pUtWhQ}+po|sF?d3_@OF>9*1A&!^Gy5dioPf(eB9MW+aRxm$nd8DZm zjoV_0P?C$#gR5k`oy_KtjPa$1bXs;h4SjlA2fa3-^o6%B^tj{XiCu zHF(;}7GtBK`YNT`=c!>u?+7}ogt=;5W;}hSVoF}uY`Xsj*Jr3DRjss)-V1)4#G056h$Lmt&uszD zFsb)IB#9;}@_Eq6prq8R8T0~gv1z;5l}fep;LCo~tv0c%%=eb!klQFYQmd%^(8)iD0 zWG}q&wQobJ!bJ(WOtVh~bB+a}U1Jt>VbZ8tNCLgGv zjoN$8K(cRy#)`D4o6K8c`8rledm;UQ}j;Q z`D{1Azy@UUCKq?-k~G|%^rU*~ZxBDRcBWHto=)qZp3WWcmM5IkJgukmT}|y1U#-1t z?eDbLX|u*`gW!u}n%g+!y~G13%OJw!$rxolq{ul)&B5aSZ#pD^mi{L%?k@^sf3Cm8 zY5$I-*#9fE9&nAj=R5O}msR;MEX|h~ecXh>V}4M8W0VVhNSNwDXcqsW)LTB^O2yqv zJa9jbgu$I7`!c#CFMTvHwlO(J|RA&xX5X*v7ss~GyHh%!RvSRY6$VB za0V8jv!di|?zJaO=ZU*h6i9q(j_+6qEHJ zRM}_ihILTT8t#ZXQbsuMYM4>57&)z)1RN0E1p(HdZW7#$LSQ%5yp*3smj@}_Sv;Hq zdP7a~KLTJHOocltdb)zj_1FAFpXsi2a0du6(J4Z9tb$+x4in)&bqt9m3wdYkhPNPNAg6tC^<0@*r8ursDBz0r z!#4vjbAfu$Q#5;C;@Am~V-FI`h3it5!G*UM4voCso*EPoTiX)vH_$;SHat9I-e6`b ze@Ojpo=AC1PxO}U$&VWErh{RJP~#2YZ+!&Yovl`Vib@~Sr;a3DeZIp!v3QR#3EA*V zaO}{jZ)%ZqeW^L$D&OUgGRQY9;Hf_4r0r8*J-y<;&Yq)5NXQ{k|DMo=vvyR*rBtZN zZXq1EklNFgPg6LR_aa=Gqo3?OV+4bGzgvx{r)sn*rWNS&>wt@?yz1hBD7^obZi6$3B zLs_=hF&G>`DC2nFzT{b?iIccXWYaH3Jqi?gzr> zds~!0zz`x>_c?^#IJw+nBp+U=a?0LA&?w{R>?5j1G`DQ`hjp|%Wirl{&=lEEIxPlk zl0|;a3j9l_&%$C_)UQ$=Vh`y^p%qfbd!~%{3{Sr34`mx6s0`dXzH|J#$_ zl_^EfbH>73s(tIdn7ALO$a?XZKYE@}l~2&3fx^*7gr4oJUNZ)JQF9Cha|C>)f$EeI zS9`dUH@o*ug;d1Eb4?wjb(q92(q+|915&G|$on;|c5Ux_IRS+AKLZc~p92)VzfLRr zn{~$?)Q>#B+>$8-@<$`;6CuaJ9e%XyZ|^WRlj8na6sao>ZNUly z;OX~Slyx2iu5ldHW5At6)yXiL-zEn*O#Aco`chxoE~=%XBWZRqDwGm_7-cA=>n|f8 zd@72Eppos)(>Z=wDa~IMC;z(D!aY;ci8e}!*+iYKC#o)X*##+a#TotSw^aON^N&T| ziVcfQlIN1jj4X@_sU}<2v>HZ4Gzi{x6s@nmc@w)p4W7yv5{_=OcoAs!P_KS?^eJ{D z*$(5%@QNl!5h$b@@~2Mv*E{5YZy`3M1epvS-5b$(xkh734&6S1{o7fxH69)Q=+Km` zaEA$Ow;q^qQZ`dbZVp04z2)&n$EN{^lDY((zh0oEYj|<=h~CJvOBt8oXUY@0+E&W! z7|dGO?NI?;qGa(T3~;H7pVCPJzWb3x5YYh1BF6*V_oSbV)**P?X)&(F#a^P7 z#}5-~>Io=ceWHP9At1i3K;t92tD}fbY8PgJw%OQy=e=u#JX__3NeQUgfl0HxzR&|< zGUbW!5@%S7RNu&O)rqegTl<=^D*C&Rows@`X7dxR%>)aMCyVFWS{K*n-k`xd!Vl$) z-rJIjf)79>{@CLhEQaY4J}Z`Iye6y^gP*0P5+$d@Rix+Z`-n4(0e1a6uf0^9yUuI3 zsCD1d{YF=tSss|%+8}N>|G`?ZHL<&dRNPIO_ENz%;DRbH9eR?RtKD0QF>kS> zB@Ne55xON!Y3lmd-PW&-DZlQVb~D-DsG2}#Kgr!Cjzn- zN#C2a9GKtmY%yynW^|s^5GlM|5}Tm7ooxSMR4={%mH&i*AldXq|MGVVzAj)L7y3QL z+*{v#7NSGZ`!2RWr2}orqeZ=qknXIQ-pF!dckr6x<3p&?ah$DiKgkf6UA^t_wZ?H= zU}oR?0A1ZD?wOGJM|iB7M4c6Rq$SA1K83;4f?h>CS|OuA=;e#UNqgs@@Al>^_(?xx zeKa&HB8oQrgIt+&UM1xU$}80 zd=l4%r_7=o?~9E+FCz}5fP9|-Pi8&^)xw`3tC9vw%CtLZD~>(C)LWMYBN2Yuz$Pj; z%36f((#R|boYn`j3N*Guy&6ElMIis1f=kQ+P;foK_)WnT{{Hf#BRiWO7k0HE6TjzN z!iC?u$K~L~^N&qx%`qgiLE(#zOD8C_8MT6+@9UcXL~;Hlf9y9dbcyrjoP4}i%{b?o z*Al-tVYGNkp5qHrjMF!vmGJ&e0RYRlea5x1g4%`Rv6Fz_tY`dpKa6dB0x9Bj?mntE z%_*%V%h%bxpk>tP5nV!)W8K?}pc~Oud84&!BPYuoah@N7$`y4{*q`W72m_7YL5F&2 zaC;sMM=e_vADT?M^Z^M`<~9Y7-Z+2hFXTRl^t{GJ3cUEjHKpziExeI1^&wB&H14C{ zy(o<18K?UR+J5PJBw>xSL8Q)on+8Sr}o z!f!ieEv2^6_N7@-iROhlKd(AIK3m5Io|{0!zYunxXd8Tm`dRr@x}SJ2@!$!Y`JM|O zur)4a%lx8VBy34*-X{?{#w=*iYDuvbcAKaUWw}Ze8M!&myfcdcf+B*}t|4iF={o!g z8Z@Z6^g97Os3c$bZd!&yS)KufA!~1|fx?h<4L}(ao6~^lYe_ZYI-5Fp3M2&eTduWm zFdg}O)hSIxq*ig+7K8m154zcDXP5$y%!H1R%PT9_nk0i`z9H>3#Z;~c#b93*Vi{%b z*FN(;qn>5A3}SNE)&h@t4>H($xEhWg)Y3lreo^#=$J41alfzesrL0e*L)-Ia5SUsw&BgYb2 zE{8E>YVNKhx$zw5(1){D)aknBM!`-ovA?%DC*Kw!K+LV-5!;vLBL&_yyj=~5-Px>N zX29^QIOf_AEI*Wv4;OD8-D)BZnWiTNU#6CKF}*ysu2(pwyaS7&21|1?`8UL0DK?fp z*rKjWfp{2Go=4uPDrIkRiLIVyY-vV3Eqt4E$Y@6{)(E#3^y=AZnTwE=)k1ZB_`$=v z(5I!zh+hOTn}g5t^NNq^v{sVit2T4y6C;=s9o;TVOXuliZ@GYZ+z|Y))!YN!pY8YF z!~ePRqW+(AD}GnzGK3uC)K{(|eq6c!=p*zwjW9-Wfa{Y@Gv63rnxJf~zkQ9UVByErZ$LR9oP0?PHc&i}a+anV=h zILhexnM5F23I15|E=c4i6^O{Kfnk>n(lcjz^g0F@i$6gYdO~x((5p&aBlf5~`SF6X ztJrXUrx!#IvD9h8xDw9LAMfB7=M5fp7GkVzc<32wY-kc?U%U=?2mv*El~^>`WIT~l zs;-KSGK08z;RPoM+TwA3OD4eWaHt<0yIo(NMv$1@{?t zdwfO%hReTE{^`MevG)BMzJE`82dLsC{vqHTr1nc5kK+AOznpM#_p15k10ugGI4gti zD^9@D9hUJ&sa`s7c|r9%l1Y<)8Rlcx2bL`%=r`$p9 zPeGvDz0)?rE&9|8O7Mmq^gDZsfftniq~`Xyb@X=PdG>0vXnN3q**oLZ#qH#<{bn7@ zUV70lpn@%ea78ihDg(4_PgEVo%)$4ryTRO5d$@0_%9MNH-@NJeJk?mP4Vb11fN=&< z0$y3A59~6$Kv6!veA++(YnmbEpP==p+17k5fG3$l#~|akZ(jAA75lqoBL7rvqLMis zpZn{KFMsl~FOd`Y*9cDhD|mK7NAA!&zm*q{Z-GetD?opFc-I}VFY?3uH!kBjmuCnl z^S}Kkm1p_BU7N?Pz9Z)jVM_+ui+K>BR@l*96`aAf;$6aTG_l(-5}4l^pc4Xoj(b+$ zvEZcsTYjRycuYej{dFP8De4C}BxmQjikL{SqaQWNc*(z!<^$3|UsHS^1eZW1cyM+57hCufIEbbpK$i5Ngz_TCdM%icS}PK=_e8 z{KjcXqN(d=^QkPzT8yStT?ggRAdfksi{pU+q1y_yfof7)QDHwuh|yIy%jcecB!opT zS$V9D(dp8*KxH}{`SENNqOF{q5gyc{a9pR2P}>O3B_toN5L@Z`$tRXYfzghvx%mdY z7eZG@WUD?_uNz`GyEzqZr%O9h5lplRs+t<6voBbQr?4<~??=mm#2q!dJF`sy# zFYd9-UXm=Qgh+g>y|oe?v|^jyFlAy`1oien7oG-q+pQoP*RY8^t-SsT!huMp>qWwD zsXmksrwi2$OAO06wSou@+bt1{xVnSg^Y*I?9ckk*)t<*ii+5Uc>Jwty=-;ny>go}0 z_jncQj(NN`t!GQ*kJls03dsw3uNz289kAn8>0v4cv7N~g&o{g=Uw6lmZ;Wf5&vJOl zerD#B>-_Q^96u=;m2C|_`6fpn-oDJWj~i?=POQ7~(_ZPN>15{wWC4p)4J=m++3aP~ zs^cI;wSk(RNd-0<(Wj*kk0vD7=PQ7w|6mkY59w}9)8=}Loib(6C}KXk5;eY;JfVhY zkCXwzwcU(CQSM&7H8S=ad+WdRCHJ<06f8<{yE`NGx0d_NI*!dzLy~SPSh=l>_ z#xI$nv-%}WP~RosJ57>NUTI-~B0VorHpWAfMJ*iM!Bw zA`8C}D{5(>-)?yjF#)8-|N5>5z*pooc)ayw1vu=}!=bdh`cao&UAlW!8-rzp9B|1eY5Y=(+bS(*%34_)?nz`<;qxA{zX}bPtyiw7WKn?NR%Jzjm z2)(v<)dFV6cLFZSdi~9QpIpFDs#x^L{8b|<#?D=c$1uI8()N{(NB54O8$R9P(^3J# ztNK)KJ!B~HuyN>Txya&uME+caK|t^N{n2Aj$si^vD)*kAITd{CA1_u|kD4=!dl%C3 zXS&7r?w@Y}WIg=ZZpD(6(Dc8xo!Fo4Rz;bH{IA~9iwOW%|GkG|dH&f= zXygI+{r}{4>4gUg76G8t4{@I#+c_p*LWDBGlq9h-px!>bta%_-L#hxfD zz2G?tXO!8S*=r_F*6Lk(-x|$o%^U74h6$2-SDp8HX&i5G^pVvQ(MXn$6+ybY*hG^c z&W?Ihvu6t&UM4LjprdifAx}|&E8m<|srJR}0)W(w%f41P&LU*obGb)%v_8iK! z2IK>aNF;HT8aMS(BidYw^~y1p?iSr+Cfj#n1C9yr7*TeSj>v>bK70bjJ6;3l&bxnMUN?52J!&OiupAX;nCMBtVr`#D~-&6Lq z6qw?^1QvUaAnDu#LUC@@mcPWKe<-3}Up(q_n!o1dX$ja{d(53yq`9y;-~CZ_a=mNM ze_F;`db8&Rhv7Z}2ug%jF>Qr+Q<8yD-OFy@YC{Iio_bJV*0gKHOg%|o`{#GY?ndV}X1u`nVW1Wvbg zybMy!oB+`D;%TMjYQBkY;;_Xa;E+#e&MoVnKdPqh{FKdX;rwVJL@(q6T-=Rbr^lPq zouE&ro*`i4^;&Bz9L$%I*WyE4;t(@BGH#+4_N7a$ER}TOBBL4n!6y>`)#^-B^aLX| zakg}f2uCb<&*@U(<;IPT@HvmEEHeNv-O{|RUD$DU4(?xi&~UU|NMTNLEN&url6u?l z6QrBh@`C^ow8!fC!pTuEXxjYK-qFiTyj3*ht=CtXXd!H8S z*;b4%cv6=J-k#7~0MW44&5VuqB`-EoAKLhj?tg;J^KN#2f*54)Jpe_S3Fz>UM{kWz zK~al9cJg`GM&sA7fmEW5{+~N;J=Ac81W|nF3r5b++Sn?Zn!5A8Ojt5x&0r*XIXP|E z@w$>&RB@cX4b9!%o)M}ETaoh_U+3l(*m~8g=|xEAx?j}P9D#t zkcPTEMX(Lp2Th3jr6x=JTaOe4jSjZ)H;(=P7@)`Xf-4N>^BPQP{4ZiV@!6^5SPMV$Z|MDKl0}YfV zZ^vu88`f|zHXX|&Re z9{e4v`iH{XTAOU7x^56y=DBNf+q65!3M{OFh%ygLsh$* z@agB`!sWS}#14p|;>1IYDsDHnpADX#9sQTZqRxmO7}pR6I7dyxjuv!Nm#q5X$aMte z{R{CX4Ay>_ce?K*qY)!TT8$83LXwxtG$)%`ZmK*oh8NzMN_@6^mrPrNo=(dj5XL-qwD7XKgUEtV zC011;Z+tZy&R%9sV0NP)#zt%qd`OMkdfsf?y%8042r6R4Zi+T*rl`41DUpqBw6`!W zT~u&PoZ~-Uxi}{qXBjBPFc~lk4Tw*W$e~t~^SG!(2Xctz++7u`=# zuk*0p<$OUhFQxoAmD$RLNAMH8oCtQ^<}ZrNFX8c@KPnjR#RpFmlW$pcQ1dHlYRshV zDdJ($(k&36ZJu>N2`pLUAvmM0YF4Ie52Z5RM3 zOhUQ*GDm5gc*4#BCSu+r{*yY-n;{o1>|{yCLJN!3{5hXN)2)Z6uIwW^*TM((nr-LY z2O4R|C)at;6&v8BlkkIxXC4FRf+)Nxz1V(f>Wu^ znWpyJvm}pqR?dzuPnkcKGmVOVEq@uP_X#&(GX)-8hIvyedBz{$W|6`+dXzOqT0e)~ zRfP1)>gDUeOsm8|d2-}pA84q^)}=kW4{)JQiIO*w8#6w4?j(oP-<{j17aJMGy;qPm zrOCuq{p28Ng!)h^E;>O^FK67O9^)PZO|Mcb+)IZaH|5MPctTTwtm5Q=Y{2#iN*0k_I z5#sQROyIWOeXhGuVao@dfXg&g;WbY``Um=)CFw%?7*}^{3qLOLd_rzNXPU^`xKCD{ zdPJzERsSDO9QhOvexH7VN&r0VbD_Pw>@9rPPmt!w(~$D+f3=b@@}QJ&BDSvVo%tsF z^_O4~?2e4OFixGpZ?~5I(ocqvGB1g4C&CE#XF!r9(GA9f%+*~!G#XvZ8I66f{hq_FI$C-K3gGTof~|eW>sJD}P-D`K%!yN-E%Rkr z8cN?5t8?mIJS%yntnHA;1IxLYguDv+#ZdgqC<3s|j__OjMqmT`5jb&WmYRphPT47Y9MLf>h|>xhCuZJk}y}dB~dO{E)}y zka>Eg39_O!apCS(N&UO{^Y2rc%Ew=G+#haUQ2%ySEVb7#m6&eBmGb0a>N7V@E`AlN z_FV&plw16H?ZKX!^!fmu{QidU7{#22UWA5by>o2QE;~B>Cm_irrU&n~7Pcjz5#wbl z*M34{s@Wt=_!}E->`yi%+aCrWWn^t9Nb&n7ZHyJBbkuZyO}JfCw#M?3l6U=gG?o{X z>bu&cXG(|aEnmUze7Lrof~dQOqM9FLRM`f<_kEHgwCjdHHp(c#nxt>AbhtK9$jV<) zUaYZnXb+++WML|*nyjx?C8nqG5UO77;0+Ktc-X%@npe}PJQ-%dwt@QOf_5=qrz$TJ zTCxsi!4w-c#9I_5=@Q#fuTSnbkylsOV5Ff!y@9GeUNwX{&*a2j`&hv>-l2FEo=}}K z@~(97Ryk-uG+eX@hlpbS=X4pu`i3TTb-KB6W&7An-tJ7^%mSF;!Fd7hW@SX?J|Yz194#$jr#thD zI4eFOnPah`89Yuno)pHQOe__J{TW8}-cNMkhDAbj?{oLNjokBW4H7DMZM*1_{eI@$ zTy`cMBqAOr?Qv$)RT<_z!zm&f=ea4@U2k2q;{v%`T!W`uD%Nj|kVVbfJD;Zk+z+A_Cg+Nd z#8jR}k^vs@P%jC0^(9#EI8`#AW*?tttdI7{+Ts}6Vt^1e6Jq7e(Y8Z4!CKpRBoA*} zGbld6Yh5>MSW_%RL!>Y}GM6h^NiFo2Y0LBF^paYP7z&y(u|uwmT$Hqyc}si^OrJHP zEM9-T50~Mtb&8lvVUCICJfGq5Lkqp-nZ@woJ(^@;#^BY3RQ4hv6C>je2T(CZ-te8Y z-Z!B~t?!Jp;cU<79&}kn*_;w4;)-ENIM2UVQl_I1N^-lZ!1{jXXKL(7EC#pr#$rs> z;U{PwzZ0TKdt{23T!5aEnWT$70tfMYl&Ok+qKO+X*#h^yfX~JQV%O-VKD3@YA1JfBQWYPq z)?VPNy?U)}ndnKYhNcjLDQdf?Sx6KVgP^ooo-2QD8k39LKm2m)@cNw1$%Q&2fc17m zziOw2S_&5t+6M22ltgZ1 zOU_8yhx?7Q;^);(-;@u?Ai<3~eqk+p#uxctvn4}{lG!=Ab(T!Vmh3;HXXB11?b9hn z%eQE^c;h^PX@wgdipJ8nuJu4k$ZPO;chg@&f3Tn4MxJ&A2v5o0xZt~Yb-rY9hJ3^T zA!fE@c&nul28o0jtBFe>!ca6X&Gqc4wdat9QODp-D>Y?(-mJrNHZO3ob2ZV-l(aMC z=$JdNox5OnJ+8X%`>Q!eB2XiXO+6)fO!$31FlT;J_iXjNGWiiEbos|FSy?Q|KJ}0n z-?Wk=88D`>{0Y(`BqXWt*aFq?5b8BiN&IqWikA?2E2|GUn`*9jSM{-mQg+yFOBKGs z=skiAhei=ZRU)rq@5|!xnr3AqeT=+1-}8o-V%qD&_Y3j0i90-wK1f=%4M-?TJjv@+prFwIg~)0 zV2Bl{Xyxm*qcR`Tq(xB+yB34!PO1AnoN_VY8oR1!bC znVCx?`E&hRo0%F)3VWOv8D&g->wNE_kAfDa-Z(e1=W7~I9%L;9cn8j7>={HBd+48O9QULPUaCLu7b&p=&cLjvIy3REMW)>XVZz&o&iV!)_#FdH^>>i+B+b1cJ>+@VXpHuvi~8I_jS{9f$JIR^RSGSUz)rpdhY@`g|0pOP%>J_*Eh0 z{ri#KaSY5AjO}?)ec&js=OE)TO(5CuK^g$Ko4>GLHpT**i(9?{ODoUmr*oDllK@fk zf?EzA$D&8LZsT<{q|EBf+&Rvb*&26ias(|O&)VQEpl#G@`0UFWRY*2`V9CZ^Ohtd=h4X` z{io4rVxl-dc;$0&(DDKV6D{nWYj&L%7L8Qti$>-|Lzj#4nk$-lRiX%Qz|{ifUnhSzwxD%_NQ+ZQlZ7GF2f26&@X7?`|L4+u zY(V-R5u$TB2pr9(ED9ukf?naa8~|syxq>Coe|dL`%_=S5A@V&=VRGx7PIqowqH44I zN>Lj_Eqt)mie}`JKLpLzZr;+5rH>V1fecR{b)g?qvj`)cF>XN9M@{^gW2a%}hm;P6 zG4t+BDcDN$)^#4Uko0`X0GGCeEA2?z8323zXVwM|aGv)v*0PB~}+-Y%|JX9t?{x3=cYU7zp+fd;2E{n@7*}bKhF>GYQC#wEBjWsWPxNS!0S$Uz|df zTQ{dBC6O?pksH~hJ&II^BlA8xvsQn7QeL^H_yva?77XRvKQchlP}l$WI6Q0;GG3Y3&Rf`Br1I0sjozxdtl`&hW2cc zrCzQW*!+3_xzsC?`WQ#hEt~^ZXcqEL$kKSFyRhcOKm?5QbM%4`sL@ zuUsO}re2WUe-vn7XslG>afpB8(xTn1Szh}2ao*gG&UN_l9;-4GO?q^d!T8K2#xvd<1!;T3QJ2^G}Hu2u0MP6Qhx8QEs& zEQJRuZ#XMoX!fFp%*7I^&LOEOAElhyZyG#(WE>D?0=D?+s>V3V|zcrpR1yEo~k z<`$>JPa2kQ5#V6ViQ{*+fZEbr5d5o=D15O7yxDZyu=rNus_@KNb**`3O;MuQ&!Elx z?#T@w8ZIZqStmKWY&c~|!gbV^iFtEEAvW!M>i&!bh~J`iY`wKIk|<97!BSfkCUPuk z$U}}d(!ocRPZZssp+yt1<5~; zIRyx0Rldr}_N;HHkm@lSNEjE$FiTGu~2o!JuW-xI$e43o}oCBC`@WcA2@58?vR7fBE^@uo4Omk z#*HWv?Q+(ck{7;`(dx>+mMRABU6)c#Vq|AnZZF&uUUwVkE`WCoh#jzANbkOghH%?!8s2o#t2Gs!a4fapWEyit#^r1hsuGTH+}}4b@zVKrMa`xcBd*+rOL}VEwi*<9L}&2n zT$rG2f3QH}Xfnt}G?G$$k}NDJ4aEHeyO{uqi#nULb~t2lspz(8r^2zSc+*h1^795q zE?$?IMxpWS7TLuyPcp-arpz21g~d_YL!kBKI#wQN2Ree6%Hucx znVU2p!du2H*Rtdw9FDytr~}a#NIGb2JCw4?HJs|2C)G7oai-5*CilB2^TlG+sSjct z$Gbg$Gi_HePCcJ^0_-3w6#0mO>7D`rI*))H;cV#~&jHyRw5K7h9mdr4WDTwHr-8>n z!>bYYp^FxK+6A`5g{R^;yhWEgB@~KIKH7$R!-dKb^4{+$rk;zMZ*HS3dY=qAhh?Fo zw9K&+b17&pDjV#WVyHMTs6*+u_R0X`wRW|QmOE(A&0>H8LRe%;bbx81z8D+SR(yz; z@%1^i zf_R$w%Hs{d+zBkv6iQN|V99mYP7b40&wqY+o6Tp+ZoiP@opGR2k8-kLR*l10X;zNX z+ds>}L3#-~6_b@bVWCIADZf|pnNgVF8!`nvnxr-X-Ua+%3v-=ZEXY=qOkTxi_sA)jdSr%MJM@i==usSNdq~hulM;`RJW|UsM|0*EEBrwv1AMTCQ<$*p*TVi0&cOhF|O|Z-sIf;v5@HHJmOVnRS0mKvz zft)?ka~HL5TrRpDPm65!UG}q{yLIB~*C%WkndJ*@32;(SixKV=pB-25rgn^=?4E|a z$<3Xw9BI3y%c#-)M(xs2sE=`N-OHN0ae}0&dIKkQ`V?%*dZ4!4uBp)jF6rs7W$IVN zL-ddA>$YMJmoyB1Ajvn?h8fMS^cS6E3FSk%^23hE6h@C1!d}!@4z;Qy>PBcygVw%L zSUcixXMdql=_@yJ1g>)(SH10iZA^r0a7CTsOBlwak+hRj!B={!erX{#k}GBTi6lIk zEMY2>1F^PU9H-eDVq-MfOu1h_9mDURJ_df8Z3tQ)UT3)=vFv>*Uj}o*t~Yoqam#=} z*ISIxNRn^Vf=F*?jU0y`v-o{+$P&bkOq5$gfJL@>6vo(qx5EMcy=6RYWQY1~h+!BAMjvV!prJt2*7@%P z0UZ&c5}MC1vA~!5=|U4`9xlJ&UupsQacmDI?|B^u_#ND0G#F#9;@l2%kM8~cb)@0dQzNQqjP2cJY#Y9Acbz?Q zlP%ok(i&M{@ee#c%Ccl(?>);PeIXBdju7v=oD2lR^vH{BtknT2z5y2Og1i%Ny}0 zJXQA$>Fa6yqL(uoVLRsEHJxpe8p;-%R5xe|h-lJoShM!)b*_4@l4H736R+s$k1!-q zA6tx;YFtXJYYmC325xrRxTcd!wc66>v^4kPO5y^EvgG>^882Yl$wNpf2iM>@gg7@x zVOPRf50;qMuKj-$vKjg3=Lk?l5*Razz`V-1uj{}Q6({%hjgbDVxwnFV>Q6^krN&=H z9vHOz?L^KX(bOC@0jTc zBOWu+h}LR`ZNRK5C~PPyU&)K)?=inAhW@(z{3GG>IFnx0S|(B3hRxa8R+!6HSVUTv zop!a-OYlWWB~nYd>U(XiKEiVKmoS#|E3}ECqR+fQKzI-PsiA)<4|lec&-RE%MtL7Y z>QJ(WlLZ(fIYS`HjHtQl)14)z(?~1+ypHiu?y`GYqJ~(QT}+yHTAiG~{O}HP+80nt zd&L9FIz6V*G?$^j;c3@(bPcg$+;Bzb*NVa6acs6uP=<`nQawO>x9MBe2`1Y%zM4HC z$c_&bQliJJX#6e^RD;k!dg`0MDYv04OW^iy;(-PRn2oEQgFZh8q51Mocgww6+UmwZ z=CJRE1OM=R>BebYyvwkpDlmhS;NEH=-U*Q$lk$s{LucElB_|Afmg0)9gJ?&jOjE4U zS^wA=p`$ISr^5@b`F6tR-f?Rn=TL#w23-)rxCq@eseQ%Ew=K=Zns7$9WooqI=PVS3 zpBm3W=jBYIepX7P^&ped2hb0}HKtljDq=>(@1#w#*{DyP=2buq&tSrRmbj2z=tCd0 z@|;A>ElDRdK2$_f;JnS=K>u!}Og0V^smOMPOCA+M%-|%8@$hbv$1;0pZ3#^@esB*a zsvM_2n)Wj%Axs#f*J}_QMda*@R=AuS`2*?3)8%Bgh;d%XycqHu-Ir!UJP;BPN^3eC zpSa2BjrlaH z01K;s%-h^6ow8dMOUtd>6bFYa%LZ<8z?%A7H}?lPK~0selNw;>K|nG46Eyi)vU1d7 z`zRx`(ll~+LVtX5^SMEN@a#7RmqXCk#^1U1cC(h2rh4LcPpJOXRoJh6Ak#8Ma6)L$CB97mTdX`g>xvNbIkbdF@ zo1;3$+r70q7V+Ld#NSI(eMZg@Az=qUvyK22B*+y1;#J`g;D#qY$ic}$Vz9_*yYq!8aS8ll~YOda>)eb)FBbWgBB{|ILX z{NpP1!CTuKFr&BZP-Sf^cFJI{4Otd*#a|W&&4Dwd(hgf1c2!@Dy3;h@^R@g0@iTe| z1IVWe#oq#uKm))3S?9r2!f zQP0i05ZdqBpg8jX_Jv1ld}vh3tGyjWkWC3qAy#?M^7_Cja4wMQObPj*pq0W4p2Z#_ zp7zFq0r4Z;tDuF4xwzljIqz6^x~nlqiELcR&_j#QMJB+t<25qmjHW)`o*X(je$UCQ1W-!Gm!##sc9l1nY`L^Y;L?KVdK z>!+aXpQH2JV+jAPJO81%_^pM1A3#cg*2CcOw?UNm`vCewb@5x{{$~$9e^1WD(xSUx zKa8Kj2ltudM;ng~hc3I{6U9D2{TJWszwgrrI?O+2AmGc9UDS3LELacAe6T)Iofx6( zEM^(1TL4hPG>{$04tr(^Az@yrW?LxaMRG1!kM!G~c zk@M|FUq1C%gG<#S&iEhZMK4C7FUc{v?Pj3?F;vit@`=o zVm~V)@-jK#H~jY1vqyF7d;rD!Y|(uLVZfemJ926-IkralN=XWR`y%=v?>8MBRlzp*hTxh%u{`jIHQ|$HS zPC|&EPYH(RGf`aRAEei%1_`(&WKI+!u~c#p@kLlsWn2T&O?1IF*o^Xt%10)0*kp5* ztoK{<+)m?Iv;qK50c!;ssRB92J&^Ka0lRm-pP)WqRefSrZ+XtYsZ{?H^u1{1?#*(2 zuqBr9wf38a(<19ZdfMXYbqTx=0!n`1=dT6~uR99DF4GNijqmhDQMl#_#N!7l9nP{S zRlShVy^vR-QcXZ*|Hi=j=k`691`=o}q%cZ#w z&@_sZ^gPlQxr6({ zti%;6F@-|xt|>S?o}G@&>f%rBXUf{*SLNLg5t<<^C2i2cA4PDVi#V{R;76(C^9PAJ zIDQkg;NanlrkvyECD7s-e*pMkB~cxSzg3d*_kUN;d;MocBqL2(>>^Cf;RP~(Hi&2& zhvM^ZtIuSitDwqy+{CBd5>+e3Wk+lSbFV^WdpQ-RW_SogoBi{h^t#8@jj!gV=4Cyt zqOb%})t-i7k2yaLHhR@U8xe-6P!*am?cc?8XEJoizS3QfLRRBMmLw>_$|KEkr1G6p+)=A;hZLewwGL&_})(dr>dx@ z{tzP?(~)Mf4NIGAtn<&0e#dx+x`srHBqtz0n`!@|>hZ0^}w# z{nZSbq#QK9I7k;0mWacjC2cb<*WP%LxyjT9QuucKgKqLV=7O9CIOM)2^+6THE;}oI z64B7PN9+LX94x3~12#@D)pqOMlZY{z>w}j2mno0YN6MANr<4VQMR9eR7-XltSXfI5 zouWifMN`+25-H6uxeKRjQs&pGHs0y{fMmUj)Wj^K+Pp4GOeJ=ct(sV!G8m&%A6{QM zqT-R0=7O&-N?aMJ_oOv9d$>8-HCgE@jJM=L$zGO)Rv)l-HKvU{Nj_s<7#_mw&+m`& zjo^#lDnJn*9Cx%aXWaK~wCeja#bM^@Anl zEf|LMZw1Zh1w(c?V2j)^{o7cUM2&JU=nY~?DutlG1hHPgaMI!X3_3^wrP$-)dFN;Pq{if^t@=?RDV(K@XW?G~Y=+{- zAjbrga7nD7%hI`<-<|NqR_!~TcyTw%)?sdWr^HT`vK!Bi6pZi5X8yTZvjhY2vN7mE zBC3(^<6kRKcgx1*aJK0R@cNg#DU<+M1Z%pMsfXCS$IzC?j%Q56U?kc7f)4IVO3aq} zh~EQux^bq;X-yEhyZkWOVC0ep7!p;b&k!Ah$0xsIp;@y;csYG0pBwCfr+U-y!T9A} zJ6Qo%ZX9`2r*e6NH}1if1(bEjMgr*Mf$rDMq|((7JjCXjC$^tO16gcNn zVv4oZPEXdhgY%5lRf2L7_AD`17vaa&(Jg9ft$bn0X8BY{hO~2k6Oa5ZB#{&N?;+$o z$5Kgr5aDY1%A=6uggU-uPJ3eBH96$}I;e8cQTL%u8M)vT$(|P;G%xA|{;qnhLoAH(%3%Z?E>nACI<$2|o{>lzvCl5yTm}{2DGR$6x-)N^Y<-;;l0xyF`XAYqVqW2?OQdGr|bxuLyH(DEl|JkTb&kd-4bDl7;KYWaUvtW-3D zVJc_Dr5RT`7;x=y;M10^+XdA|XdTtNYCTLPDM?jR{x*hcI?1?ATfzAC_8>p8Q!F=) z*jUjf?Ead=A~Njxnms4X4%9b{Ff=?KE7PQ3T#@Qhz6IN^h!w1pT2Mh$UN1&><^v% z5%!}6KLev&XSR546+FzPnMZa*p83yeuB;c;HPHBS%kh|>`4bue|&S7 z2{`wlf9O|dOMMfSBbY0}S;s&_8RIRlVolAU_8mlu>VnuHf?X~Fj_o7J%7AFRwyXsx zFna&)`$Cf(w~pqg8@ZNnn+ai~+`YP%-0!zi^o1w&;lvubQC$XVgJ}Uz0(HOw#F|@H z_yG9{YE{#OJgHn0(P8@G=jLP3=I=zrNIsQ(_-{Q)-nFRHM=4(xwZi{W|v$K~M1DWU>tX8_hd zMNKXv!!^>d_$u{YqZIf=?_6msjAo2+pdF4{#&e9AmksB^9peLJoT~@*VCLNrY)HXoX)VKglw^ z+kBYqBcieG;?lgD_x$3fY2Lh~XPjdksVyLI-WqOJP$B~`l%=<9RGud(u}w1nfo;>T_6B~QWu=~D1V7cK zJ|+}TN&GHv<^Ove(0@SF{D&JD?Mt=cx6q|k^) z&0Nwi+P8dqLqg;#dlzAGIrORfomYL}kJRo+{GaGxXqBBUe;Z z!*SrX_Fb~(0*qU+u3*_8dG@&;Yv{UUnqR;B{A2Lm-v6VIOOInsNbMSxOX=C)FI z{&@IfW4w4wiB(HxMs&i$UTS*bd66{vd3qudh9fp`(4iawdkehWEv28ZFAefwahR-R zs~P(-u}w=y-91((wig#D7ic{g zsLyDDp|0_`4O+Fra^EJW$lqnMqOo0v3-p+vF?yN0pB5=S=v_|KIuduD^ig7mzmr=p z-h<@Egj3OUW-I`zWi&cMbc?q1m8Lz+M;n*Tjr0=gar^CjVS|Prd~t8nWvSW+NWSVc zCRA};R#w*}KHv=Q@{p{Z91^i#9|SKX@eUKXgf>)@cEdr{bvsi)7&Rv4w5)>s08^9O z2NCCWli^}9XXz!N;(o3{nh``D;`2-DR{Il#byKZa`_w+FsHqi8W3_6j1RWRRx=OT@ z5psQBdPWg+{AJ&W7@yc7?ff}J1aS?1Gi@n`BK%tlmSMBQen+O-tcKRgEk^d^QbW~Z zSv--t==06FMo59?4m454cv*C^=mwlAzl%=B1w-`G=QSDSEsq~9BHJpbDKQ_7k!KpkZG_rH;|%ck7ZD)Ot8jg{iny znl_JmdFSu8l!;i8cUw;i4frd7qiSa`wC~r=G5OJVYw%5vFu6`@9D4eg&O?(+1V)er zhQ~#m>|Kp7Ujx}yL5QU1Zd%1i0M?^ z5}mYrq5;IEGopT%g}Gyr>npivK(-63{B+7%xN(AO!o_lR_a>0Qy?IYwkU=T>i=n<^ zD7^Xbpq~u~5tEFVO3vQZypkWjea4ah{OeGs4>DX8=7>lyAU%Xp2^S6F6`t*DBqnne zL&27bHJ0&LBIjidp$k_>CrNq!yJU+=7V5uW`A{QKxjdd`u;$cm&aWE*E4mxutAPpC2+x&c0gHFrZxX z$+wc=XWZe0ix6?ZPF%OVeR-*K4-0gd-Q1$4#sZeCxXxnzm@0~SPL|{vEmVTFogfw> zIDuFq^&?$~mXP5)Byee^7-&(5CW3i1rIusY^!sV(Wlm;x(@>I+>QG0Cs2~irKDq$u zqlgVZQX_Zf%3h6n_G7u$&{b@;QnIpY3^sYxD^-lzxXqM~5ZiBjgo|&J*WEu4a<%CZ zzUW`;h0jcltOLcOwI?~Zw`&zGxH(SVAOqcRS(9kO|cBDU8ci8Gj0+dY? zbx>}E@3rUg1n-66Y9o$uk%il=dUHR{Be|{;203;h1(KhM<{J>w8Crrdd#5d9of4>G z^n?bXw_u8LIYvB84E=-rvZ6EAqpp+eZ|Oc{OHfvqB%D}|9-X*-Tx(*x(_*{3)UES& z6c2%{qNG?Z&niN4y#QIiZ5LBVcQel9wyCSMAX1Sm6((&2%2#&I!LPpF5a(TlT@MFJTC&>8C zPY`?mQn?O|gTv0?T!n>aumj_;wHh!_(3jdhK3g_L3N+U@7h<7@u9Q*iVWgIfu#Vs; z0{ri8_=Ufp)-_DfLB@XE%K1`$hP;dEGz;k}ceWBV+rY(zO*@+`mp#1@PB1hEM-~w+ z^%;IPRg?H6BLPm4#>$`A&gRg{WlM;uhtVVG>b`2Kdh5n;#ygyk(jA^JbB)9H6V&3b zrj`|%@d|Nol<3M3EoEb2o5q-H5;y=@kH#IQI?biQM z6Ewy1X_H($@c9e4PuOtDa6|GmNeJl)PNQo69-NH6WeUQvQJK{|6x zpP+dRzv7@Y8vNG-<-1!;16!{q99&5(a5_v{aJekrIv-Xsx^V5%#DD75Fsl9_GN*G& zpq={W2uaZ<*@^3vPv-hH-IR}yo1XFmVf_;tp!-u0UD4BjdE|uGh4D471?0kDCe4$A zuz;0n^fFF^EhlO}3S|chFf}@7;wH1Z*6HUfIUUXEw-r%SQX#$b+ETq7wq;3e6uku6 ztV7+Lks_q=;x?MOK^Jk?5Y|8g@WG-2kHl^zZ#<<=aZ#pu!OZYD><6_m0rlr9_a_B< z`?^?V6Bn=D@A4l<0dEl1a9_6gDvq*y0TR3?|DP*4zvb%|jD01wwCe%jdQ?pvx-+gy z8SvIrx0qn@#u?sdzEwfYddK#oTGsgWI-)Jy4v^p22fWk(j;ZpFgz`vBM^baD)HrBv z6aH%`d>gR|UVn|;qhXeJtJgC5sgmC07n*CW4g-ZLZn`hyc5bG`jM$Dxr}ROaTcJI( z?Z(#fqL%(hB)|MpBPSOVS94Dj-0YVOulPNR#+u=eT&N@rJV6AsKZ;R6bVXo-eJ5R# zlgX_L!^l3Nn&`66sH15d>T9o2dR<>1tLvdRlP@^eYc-(B)9E*Q;hHoLQ!6`{W5V#8 zQOfkzww(~XO#l;JZ7J6Ew92TvY1Xyv6r)qs#G9V2#$bwXE>{@OEhxnmV5I~~Lynce zFtbhNKb^j(fa?DN61}sUcLx*@kPUNKW9hBYE4wW>`*ie@plv~yQnN?GpYo+yVyK5` zow7EUo?W=K3rU_DRMa=VNQf7^3ntDGbsaa+vdK2ZX`a2Y1JnQ-y(lBNa%*Z;gy`}7 zeZZ)shO^#6xL5zQFvlbjQT2*<`eqTW9Z*p*lYFX~YnO_#{J64l8fLT95^!&|*6S|G z>)*(nS-h^%_)PGqd>B}Q+DfMQ(FC9kKKo3*F8J{$lAE~8md`1XCiq3Ezq@H}NgDS+ zk!mTbZTi{sN=U>A%ef=Ino>yE%7!Zm=x<`Dzp)~KQrbT|a{QYb8qRjwi^?=CEoXoE z(|EshNS(RGeFDY1m$7g&7!f>I=ryRhW}70zk_u&~vXAfNz>ka2^9+g^;tJ8Cz8ppucdYyX2gs!MgY(>ps2xvlA{TV#du-=58f)7) z{CoNImL!6sd-(Rm9jn6E4$<{6uWhd+yjA?PV#J7QX?P`e8GOQkC!rMZh$D@G+tPh` zQWM7Af+p#bv#)3xH)!hmze5;j{|$xDhqCGD z2VU7YE*F^qLo2R+9zlI&Mv9Oy&=mnsiDvb^C}t7!$mt(|XGEU-8U_#z zNv^wL3NdUSCBco3f^fOcxl&_-l528rXIm<{5#5s9Tnvm-HCFM^W#4^AMx*|Q!Uq2M z^ciSJ&~8166dGAvgv-kO>TWlc26CrdEQJt-N>B}aXMI&N>$(XeF?tVtvMKj7Xgv4 zu<#M1x~BOGN*7%F9O})vGIs}GLKz3{B)_>Y-d$NNqY68H9OeZ2SEIm6uffgqe}H}h znV%?LKtkl$q#6W*LH%6AO6E!fj#8)b)OfA?9Pr}FVH?KvpXKTnYClsUr7|`>j!(60 z14R*v0l+S-#{EyVROjD5S9r(;cyPIoV(|bfp}3w7;i-qDnzNxavtuUJQ!5rxPf^5; zWcwTqy*=xBYRAh*Lr4R}JhCE6dV|=ZxbXNI2CP>~UP5z@;F-dAD+@)rd9z<=(HPjy+?-dLm|kchcJw|n)|EK^<*`Erx+t9Yw=hHmv0+;y z03N&OzSh1WyETX{i~7CoJ%$0zKpcZ*laU#=gVUTG#7Z2H{d)_DjL}Y(iby4@e`GlG zEa{hUeyfCt+jhhUq;kfnS|&3>KLz#v;@+ue{mH#U2f>$`xV|GKO~OrsTdrWH{`$m} zr0z4{y`P;YsIN<}pkwq>VS3FC5JHAoeQRuQTF<7_5rrNub7KAN!b$#q(W-Zf!@2$^ zzvSP)u3o4lOai}U#?!H!BBy5Dh-9kY6wLxU1!Tyfbb!iuLR(m?hdmq`wLr}|H!`w<69byQxHJi8% zD>>MLII-C-syVo^yt-AKB_r{0{rAI6IX}xr)kJ*(B2*-W)bglF5?m zWC3v}RgJe;BZB%0pYOPbpXri(qnMlRIf)SBE3EeKwsUq;sGMAN3m(`xQW0ZiAjL#U zpRMNJwCf?>fXO*2tMr#cQK|6k=XLcWWU|##WAsWb9D8 z7<1?ogPl3_>iUu@wrYKge9dLeqLpVNSoIOs#mR@;i5D`zbsDC-?YzllW)%V&jU!b$ zML!yhxC`YukGJzU5o+^ePZMo&GJeaoF6k}o3&GL%C59I`+l92df`iV}Ug(%zU51P} zSa3&J#FU3Dx&8r?vMfH%(4Mat7OoJK1ZvP% z_ta@0t!VG=3mI<6r@=lwfi3q^(8EMY9#GKAaaUV?Pboc zmxBcl-pKd|G3A0XNm`rM&(uD+uB?yMfl-_dx(In%Ao+5+y@41>apr79>-NNq#)WF7 z3i9}Y?VLeXxo*oNKPDw&;-$aj#%XF^SF~xS3=Ie8R4w|h=jda~zWPa!pyqA(M3kJJ zns{U59H`y2ytrVTdB-oNuGwSvT&^DPCH|TJTcd8#jg%nd^)x+^jHFIG2%UstRb2`& zl`mLkUjwNyt>=+rGwG(VPm-Ga>w%XqipgrxdFnO&nlX{f>`7DSXFT z6z63YI3-mp{-wKPE} z!eDw-TRaMnCmcS)$(bw>QIpIOBN2Y0Tj0F8O{t=5YAp!p9=?JO#yKX%`Uzbn{#krp z!`|4}v>nAgS4tJtd0YJy8>7Px?x^1`*aPMnrxFr<)^WVgvErRxY`IDUNwG{p-9S%n9%!M*AFw?+b%WZFtdUTLa6RhAbicR*LIFN0XJbSqO6|5-O~)2VD*eZ1ch1uUx} z@ra%Z7pr}laka4~!e8Zio$@9%69;k|F-V{*mA3p;twRT_Fz!?IR-Gc!NGFP_xU1pg zYJw}9AlCFWW!(s}JC7cgvkSM5%%#KGOZD^f;B6@t(hpAlN)bj(j`<|RwhQ99CUbEq zCz;yKO&pTwnam+^)yEOvUph;sK3xyJ^SsNX5Z%Dk^*X5mRtTJrOD*RauAt6q;Wacd zg)x@O!l-dH_U|vGwrwzFh?#WY+P{TyfRPYDC~wd+23EDcZ3G&L7FKxfH$Ig*Od(2L zjd|gQ2>^2bDr+gV>N5MZ1{Pd9b+JpEQol_4Hfq2jHFlulT#jElSm4Lmo+@pQDT2!@ ze9y`}bJO%1C}4UREjuGfR-Z1VtUBk^LuBW9Qze-Rk~xa|+9O|Rf7x(QTFN@vs>2I- zWECrF!xF<87^adWZ=N~W2$b#tMI?|L696{KU2fJJCQZBtZy@&j$(o1o4Y-&ORBxy3 z(fFsda>CV`*yN2&1`2NCc4Ina;5u{jgbILk$vwC6Dj1<7XNGG`jHN#QNeN2D5ya(# zr^*WhWC7LfPAI^y!vpwrN&vsEDDQgu4-jQ>>BFz?RinJvaA|8$T)k_zR>CK7$wN7QhV2r>=n9BFl%0bwFiD=S%GMi900WOtGr&FBiMO^%`G0GxZL7!uoc?aA(|o6$0!L}%eC+_1BU0M8ZpWCJIGJePMrK>Mn$kVv2fMco0g0Pab!+O4n3pr>O>cymOO2BQGL7lCy$AKSJ|sEP&&TiIQ`^+j&aa;O z1?QNNF(rO3wD?SQgAreS(2%3%_plLS7d>6Vt^*A_i0){9Q*)|YX!MYsxnzzd(Ho9$ z^Q?wFqAT_rLm-~D_BqKj5CQ*IbdK}8S9L4lm`u?Uqgg*_x+i_Wu!@MCjvlq>4F;^vNMQ&2)L?i{I$I*Gh*}`|W zzNlTV!G;@Ezh@q`pJ83Ej9QbN;JPUatj7csy7-b90nfxZwNQ1t`RT;|YhkF3K1=bc zY?bNIhoRg{hlD}DTer|<@Z-9xnd63Wm;7o-0ko3t2ADC zDr%tHkssH&{&Q^*|$vNUQpifGAk+^?UV5LYqcXmj|@$9Q_JmP zhmai2rn)+|a1M>&B0Ed+%8$Mnu$+}=1hhFnk@_5LPLbA_<8r^Zb`MBG^mw-37vQfX zY%NJ}WcN|v<#)@!p5NCsTwZ)Yb2W7YK+wE#}Ty8tCC(bVSu)?#eNSSil6;nn-;wOH2g?)>eI{9 zU?!zuE%7YwB+_R@^PC?L4*bKr_9vDRiT?LI%=$gSNsGuSo?p>L(tL35cHLP`5i3f! zV4Y#WM=lmu@?+ij(w(?w$}!{7QeCN)I@nbujjHHoeLtIqwu9(gD8jp{lcVUTp@uN& zCp8IWISgZD>~l&{1wMtT6L39%XzTBtIPy+fe1Wy`GgFx9oPD$`b`6^Kkah1!UF-`_ z1uFbbgDUg?tl;?nqCC6yg~B;7J^T)3sK1ZFzuw+xjRz4~ofRb z0k>cI5#6H}if^)XoDsB`{0At+UGAaDnX=Lt7x!Kw*Nf^b!NyY(2}J5MPOYZ30!94; zWC#d1Om!KvFD8uMTfuG=qL8%>VUFH*y}Mq+fQkU_+Dvx7a>|N|a%!?S;l>|Py z0gbXxsw5vVjByN-h9NgBUwP#QO*k;-t9LfD8Te8E_W4z)kk+H>-Ll#hpmEVL9{E%0 zV)useRPR!itBJ|wHtSABMFPOiLl>#-;$PpWoeF*=B4s?qxD?8(Dz~`Jo|>o%R2$N- zJzP>ddp^xP<7wtWD-Aq(JoMCR{O=&Wm_uC903^`iat+)!j1TB@{kFeW%dR??dRqpH z@)qMchcu9Za%kQnnS`=QRf;C^AE4^g7d2QEp9xw;y`sFT=}?soUiUn$Hp!?2>aFqT zcNJ1yq8iXPtA?|`i>Px-t>Y5q4ktWgyvJPdS)RWL7_^Ccy;eXE_?sUuI{^pe?Gb9W8&AzWZ6)>IgNQP&qcwighUxHZy$hu|BV4E; zGJOW8n<{JRD2PtyMjgw#F;hFZP5cGx6=6ApynP z)0t_jOYNJSrYcfR8Nst{_x!%fOgEh-(%vwSO4swUT`o1WRlS!G&G^s@57Nu^p^C~o zt4Q45c&m8p$lSp^{8ee<#3M)&j&JHqGc3~@5<{Lgo6;$&V*0W#jfcS)2HE8Vx3idU-N33acOI;`rvVQ2?Rb zA@Pj?b%A5q2R65>!)d`5Oz?bu7rp+3_PYIIZiQ;o{T>G zx>r@&U(Ci1zKix{X}kbUyJeYGCoUyD&Htp{W}O}HJh$Sl5)=NRU$k5DJ7p>@oYr&X zVfXT{S?fd3=vj&RX_$WPON{EX!7*Mhseid} zK0LKX;ugaGu(5HpIKOxC5h&M@|Ei1;aItH@JCP4&Qp}`|tOan2Z2h7DPSG39xMMXO zZN5LcEo<8?cP!! zbp8ap6;6DB`v8m&ywwLYvwv0KM6iC9HdPIm@Bx5gso<>~08l)DNnQQ~0Ez?bG|#A` zqvT9S|0Q1LF!@v)xRQdao-H9wWlcb!t{>wQA&P`Z=mA9ikkv!@Zio8vai+uEZ~+>> zCHyRH?_AV8uN3ZkIB60Emd06w(le_0Ckr9T&QKcy(=;Rc?5D_f*Ih2<8DOBa|O@)oFS?b++Im3J%KuhX@U#w6z)VrLdM zR0t3I0_YRmHesFJ+5#SEbxDZ^Mzd|kTtW7To|kbhGj z*x)8n>v|-Pgw~|67L{Z=2fxiLC~6JEmn1+3^&)w5hPjYf2K3)*qkrZ2VT-)+LL-2( z00;Z1Xk06|E{+ySkl|}`rXmj3W(u!!b&Y1$QPcBtRpWpQ=z$QWh3H$s!YT?$0WpCH zq`KCD7yO>C4ken5X#w$bkbRyr%6IcwK{Jwg$$b;ps)2#{!Yo=q)i^SizaJxRSbn-v z<(w5Q9_WU4**um)X9uHKiD%xN)!=N*C~IV0!@n2HeMrv>TEoR$kU%Eq;UJNaMp>*_ zz#Qh(baHZ>;u+If?4|UhSvXn*?Osr)f{3GA3qmF`2U9~W?Iyn~hN?*@d>P}YVVGRb zE^Zne!FKi&#xMow!zeVh2_cTBQ6T1!c>M^OQg)IZ494OuB1)irKW31E*>OfP9uQhpZTBsz(#^$=` z1Z1T;KXf)I;H8s%$%^iyjdJ1U##XaceWR;rYfo zJfBU(6>fac-4>gyGZo1e1KT-czGG@$?d4K`RvX+75}5t8_XG5$Gf8jw+i@0$=MXuy z&dby0Tm(*03Z1AT<1H||n%NN|V13r*=Yh}<_0@{rO--KboxC?z=&QG1Zn4lQiN7c9 zuoogJjYG5RRQ*s=&>+L_CplLDFPPt8oG@{|9`^O4y|RKDejX!h(%!P8DKg!^-T*U_ zf~Y#FU`5)7y<@-SH-u#+0lyWRO}hX3##n5323(QY`E>G z?Y3?iM@zooqv^bA8DlZ1vc1MFr+_)RX9qY?=e4bv3*Mqf?X)11(1v-q9`T0K49sgG zF;aq%pGv^`H!k2@?a5Q$%)P#RGIkl3ozjf1HsoeuHEqS2HY;;kGdO;2m3i}sR9jtd zE>pJ)CbP|zFKBfF(@L@flDE+uBb;<`5F276pWbkSc>*Ki zcx_KO%BHuEYs^Gawe@~Dumh%zSn~JplJwuUJ%vMIt0lJ4E{Q!g=$H<|QX=}ePEUVRS`G%P7B-8pG#RV}6 zV<>6)OcUt)eXVB1uYAH61p7{A%zCgpG#0&Woibk0^tknEA*#9gR*ml3M;h#xI2>PK z5|1@N{AiL$pux*8F`7Wdl=pNB9f=VZnOfT;28gk52;8ZF6b~nm;;~@lT<5)S`vY`% z1++cdyXxYeVAhkEE?PY^7(3FHCiWi9jPuG^fpsO0ij0z_mB&$AE6x z*FcL;do@FvylY-3XB99iPUhnv8H%H+TIGV``J62g5p`zIF^@pVvxIQHBaT-Xo!MQM zs-NXx#e_t*-qwNX-M$(#kuT#Cf|DBBuCNL9@RpO2pPNHXlf2egeGro> zYe`%?HLS1jc0xY+ibe^D{^0of))PmrA**P-YSnUIwlt=;dXz{NC3-2}NckBppqhi3 zhmqHso_CPJm`#N*Q7eF)m$ddsJH`!e3G{%L^)aZX#N{)7zW!^^it&rA4+WRrvg_#cLr101Bz7g`70UeMY0HEceW z*iMG)=Gk?odX!NG92GWX4~${ul{Q(5zmjxFDEwpT6Hr7D$o;l*xZ%fE)Da(l7*M4_hg zrcA-W{u`IPeCe*7thM@5E;L3Ow-S5FRUJ*|d*(IPXJ!aSQwaunT&5lRJiQgKR2=Wi zlQ+tSkN9xZI=)b(bwJUNt(f`h)6^kuP7EXpOZe(_h+zStVHiNKZEna~YV@0gysk9} z!Vldpnv~JiW~ac6k-?#>RT^i7(7ItcW0PB&rJ9pDm;jC`ZRJ$ruD)p9#&ble{@28Z z5kI=6yFq=0K^;@+WiQLK13MLBrJ7=8dO^kdK*}Bgq_!l&U&1V*6{w&Uk*xKQyN1Mr);^J6NzP z62@p6d21A*S$2!7ue-eSe*aWimnVkrCR}%NB)l*%l;Oy};LBc?k$lz!wrLf&r9=%_ zeEN)7vA#mHSj{f@8VU^OE~wSxXqOf(i=JT`Dd0OIYRkz#s#UL^IVbYkG%}nb=Ig;Wp`bIJ?zD zsHJtJx*$D^#Um*JI&Cdk?`fuNI9c+!Y`{^nvEiSWIP49nZ%AMhMHP{UTNh0B^4T`X zlVL@RQ#4ZyE4LCqWPp@r+&>S#&iMnB6%roUOj&t{aIHA13AAfBL6F27`~)}l4iO~n z$N-rxKox&s=(<9hZM}WcO`-h;6Z(-N<0&OdL7?Eh+1w| zU#S*f-#EB2R8Iert@DUM_hZ+*FehfJwLsiOD3*HlbY-mrggcU=cDc+5r5r76G}-qv ziSku;!q3#_XsAr412f&FT!qqFsfEG1dppGsPUI75+!xhL;(+F`QAy+<+W%h$69m@Z zQW?LM=?cg&pgqxlfTkURB7tJgxbSA~JrdBdC;j<^GM?I0wcD4mJKk+8)Y)#%pJbY0 z%&P4kkID^=wR4g;+WKnwo07$br`QH8l8Ow_{*CD49|w21d$C)^#-En7zg8w2y)g82 z>FYzd7!tE9nF)45Ys}7`c?&AO3J2ZyBR#E)`|QZnS;0;n-A8@Fy|qP9o}NjF&KXl> zD<@3@+ta3B_CPO~)+(!yc$b~&NKO|4SDSo-O#Q{-OReUvy@dh`rL5^M(VK*=EQl8& z*&vrVb)x)Wa*gJoaE=3tS`8kU{vnS$Nm!|ZaoEKA=muSnf}V0)&<2574UC6EdXq6@ ziClIcf0oMQ2XV9ZDRR`n`h?DR#N_O$GlCcuhynYPyBc6)`4Wo@z_F@bYD|B@vE+HF z!y5zul6tChx4`Ji2q3ZSk&6NZe*=Oqzin5ul$URBNg_U>QMad&BoDJm+59OK@NuPO#I0*w>gNazpQ@a7f9glcv1ObBQW(xfT6)Mp<~Ix=sgY z3VN2-b=?QX@Qnzair_T@ZZLjbgy^e6oZ{Ljwxr1Qgh(g)`Fg%|kyhhlcyOuPI*H_m z@h|w-jvoA55NQpr2~*|`XYKsTh*%nAzFs8K3Ao)#wa8aq>-(k8`qmoKFc(EBHJ;}I zpf681=S)>q<1i6~N;7$$D0yDu=&;O=M1Ya*uD_yIJGLD*uN|adKFyI)^km&+!+5@0 z|LpDkeVQ8|=o8R;;a`hTUGY5ATi?`3?sMT6aa%AVDA$~%>$Rgo-MS>+`$1=lFQT>U zUa9m_$`Xtml#uh@tO?N4ulp{FS^o$`C2KqEu)5g zllky$T%eo`>=ZoSQhkES7wKrM2-mmM-Fl|}2f&Y8Up4nA9s7Bi$fiktX5MeLSsOLX zWo7!_;lY2cn72b%AIOSVei?d<1WMt0X`G^0f;o$!b$1=yoAt;Og1GjqL{wbWggzr^ z{Su|I%8_ePuWn-TV{}e8{RbPv<7u-`2S>Ht%xXm#W#G~+-;sN=zd?*LCC9?+1f`%Gp-ekECA0Q*;zM|NGg$$o z`u1S1>6`Of3XA)$$U;}M^Wv6@Secr$gL(B+&M3GA=hE(aH6MUqn~T%gVt(e{Qc+|T z&iMIoek1(R71ezsA?&;#zwrDcx`*p@ z2H-N&obt(&cFDW;+gA@foTI6!eF=y~La2$3Xb9k>c9)Am4*vu${GSkr{l_dmF?qVCja_IHO2?r8E-Xqj3Eq7GHWwwaS! ziGr%TMxf;xse zqDW*I0|6VZ2}p1JGo4X&FTLuar8;$Z!OXwQG)4CkH-ame|9%XQvC8E}@2*3Lk!rQ) zK2(v_=L~p7}e>CaFEJ3u15yPfNxCDt;k7j=jEo)0g z{oLJs*h1AOkBZv@#r%WKtl}cwaA}i4XKrBaDW>8#qW5i4L%y}LWQ60;;(H2lzI(2H z&uTBQBP`|-|3*ElHAO!yjRb@r+P-h}qNxqG$#~l8>^#9GXv$8LU&!~WH|CQ(_!omJ z==(EL{PdQvJESDo*n5Z#Ue&kYI{V)}MRp$`t8R1Hl6%|18s;0jU+A1X|k%4mlNqH>ML-2^wax zr8^;5CeY897Xx zy0H$jQ|GtpB$8!DA$m;_u{1xApUDtpF+k3U`8tM&4bKs>06Pc3@HnWv+vKH>7nl zLJcv3U48SU_(rl|5Unliyrp&a3g?dYjS~U8-}mzMd~(_L5gTIl;d;$=oJs;w>ges2JR(;$1b^eqr$zIw zr4oz~83H3Y8{fH}3^DwrzHE_O4p$TrPYetGN>2dGpXdY=Ow?piJ?y4&YCYU#eN58n9Mw%UZR3Z3D@Zvc0=#vo4UTKJ0|fL(kD_pSzuJ#2|X@V*U{Ti;|`Y!wkm_G0cvPu#!EMjXvHhXCa?%}a zMYLzl$@LOvmZAHL;Teu&$%-_&!RC7A=X#XR-&reJHPeY}&MR7W3B)X7N3BBgJdVUu zW$hE*^Et&;PL@S;d<>QACkIs)g$)j$o3Ir=eEv0k;ryoVRGRpk4HTsK7xwEv6#*vF z;qw-DG)WE)?Obv-wclSaGyM>!4)w$~N|5FQp)`pr8`BAN%d^%yqV*ORTNI^hLHG=6 zd^yYMVhuuVWr-O)pGv>V&IppLp0_+ant8&Zo{>uHXQ)9HDZ|hAD>JS9EBzY@&WLh^ zFOG}P*#*SD6~*L0E$AloohkZZ-F!*`XGpS20bMTr>xRDSW!0lr!1gGD^JLRE?E4G9 z$0f$RNuRGNFO^Z@;5NPhxi0D(xJKvaKE^{X&MM@jcpR?Ah#r>jo@wo(z`AVL4#>o)-=!+)^W z{Qv9dIuCy0hv*x@oiVw(?hCY#i*C{Lob2o@mswOuolNzFLWHYNk4$1vygb?&U(VgE zHj7G9Xbhite-s;r=vW>7r>JLy^vapv+nzSK2Dw4pV&?@R`$qMtv35Tc}a7z@VEcMh4?Il9wKHWRKt zBW{O0@T~`xSs6pjgh$tu`GADJIacyZTktl})8|IBU3wyKVjDso7hDp*hn2po|t=WesXw_uApZ-eVjK<&#_u(ha0!0S4>`Qt01EJdB*3kEj#w zva1-Vx;sCZuHxp5ia63?8IFM~p+0y{@|ZEJE_WNJ!yYVG^5YkLUkO7id2JZ|)sLv8 zBiIn1!nF~!QN6hU=-gH>Md6tg-UUT=donQOwdWy#K))~}8_irlwK~#Jxl(wHRO%kb z!yv^41@5z->Ks)W2J!PKcnvq`4!hJ(ukNEp(4h;QfTDG?RIUktqsdb|&9p#UkXfA) z2pBW`H43dItffYpIM$`dN)8Rm>dR|Vr_h>J8Y3mC>3?AB6e#DlC!?Z20oef*Qd!|6 zWuW&r|Ctt1vCQOsGhnhDo0>OL81;p)n(4pE*T<$N`%ehs7MY|25T*bPR@5Qm>QRU95e=S0z8?D9Oq- z3KRUldESR&eXsqn#lb58dWMt(_mg+7IrBvKa$C(Yq4{U9m0;M|0Mpw&vV_@`;de8+vb~9op1Fb*%qhG~kXF51wFb@XEB!?9C_jiN=go}%xvbm;b8fg3w6T=GR0OnLK z6ZjkymUfgC3w-=_Y`Au3NXMyj+A4Q-)atvEc(4VP!83ksc?0tUTR36H7o3$5{L?8W z)Ahb1LH7ctd43x91M`eH8VG?Z2K6kM9Z{l66d%~wK=^CT-c^-VWnLEM-^1pR&&uf( zKK;kk2Wa8ih0;9=50_`-pk`<8$CVzbWCDYL3z5@tXuifSd7BGvGnH@r{7`sJ+toiv z-_%}5oXU~?CS9KbcbkTXI4h{6OL`>WrAaMR>sD_8!mg@otCir_Q(Ghq zJodwgBixLM!AgOTZb>rE!wru-_4MUK3xs%LX;HT^YF zbQ``r@WE9wvG__}UadJACax?A^l9?rhZZ!3bYof_*euEsGEqV39B@5V|H791mUZ|) zfwTA@n=27stImB$S(*PfnI+A-={Lp`a@f7TRcjF8_-)Nd$S&Ccaavz{y3>Ne!=N2+ zs@iectRBo@@`BD3F6QF_65 z@h+bfTSNrODAYO)YgilIBBF2H z`DA+K;y)Bi6U6q#=!P-?9>Klt17AAEhf-m{bUD4McXKfze7xB5OLm+NU@NpvrgbZ# zHBTinUV<4OF9iZ(e3x)G>aFoDTls8$CLXt}Qj|Y#v_t_uB!|BLL@np|1N7JD_Rw}Q zqlZ2y4d77P2k_86b<-BFYT{_55u}OodbJI`kxrFezf?d>O-TU9q~T)Jcd5<5qmHR6 ztCsct^~YL3I+UL14+xGT%!N;cAJFm;cvVl)S}s+#BVL8+m4i2H+hZ+4tGqBGeHPS! z+=SJB%lS(nSZM*|>!&LFKuZ4Jvx@ch1Tc<`TIe!s^UZS8%~pNC5ag7>%G63^P)%H< z{TbyJc$95=WayIZZW$%PvJEsIl&19xy3k>{k{CT0>+U8{WN!e& z7oKK}nhk7A@1B}RZy_G0_sCLU_JwoYe%^qyhe}EmQyiA>cs{)DwP(|_tH%BCWuW6y zVr%0#5=QPxEEpo-)Jm{%dAmX!3O{$%Ira9gOZDdvSt{*#n@(7PYY6IRPA|{d#fgoN zQ+BD1k=JuKtV#^A+?Up0?sW5AW9(Bz_9KYGaW%91i`Lv;cBS z-Zo|&J6$F%-X>evnRlS%>&(v2svMJ@i(qY2IXYM#XU-U&)H)wo?t%;_ik5tNDu)cvp!Wl)-O(; zZ!Q)+D&(5N^pYIh_IC!G-ok);{rgwmTYhYsR_&8?Qi7HS1-VNaii0I1qqYyLb`h>e zs`&SXfTtUF2-L%%moi$#=toimL^&xxUk0o^lE2(0p>KEe<=Gx^V%&Ax84(6R)W4hBPc50j6isw_NH<7w^Nw#nSJ~frv4q>9 z!+-k}@zFSwo;*3a?{z`-RH0qQ-~y7LaU}SM$^t*>atVEj`kgq9;kH#vzQSgBRY+_m zefF+Lf_RpaWu#r5VPtp?J~`R#v*P7zDf5;(){T~X9sLIS%0OG*XW7(FZR$UuGUHPN zZ@yc=>9ITJV{Tin4T=&Rrlc#^t0}KCGmk){DMstFpP5>jLf;JtpG0avZT0$~*Y$;Y z`_eY>+27-6(Vtmjg@bJ|-%dQcl)KI)rr8xGnLfYmfBb*LTbmK8XG|Hya@Q`Ri8u z+t)-0peGTt|Mw{i|21DZ{K9bq!NGq_T06iZ&u`!^>QNRjkWOU-oPd9SZ~tv~`Ma9M zO`#3q40Q$+Rx$8wZ^I6R{z;DU#Q5t}$o@K>U&#CaZW0B&tbZNT=C6E;s{j+t)X~9d@Q2*Pz@qgEw*pumIcLxeS z%^U%X-9kfu0j{@mzds0fhYJZ|>^^dnX6DVB@+g`Seod8Y6mV*ua<}{#-0ml&?pP%J z4}6?W?(4#C7TJ2aPzkW%at1%fpIhK^`E&av&_`TeALtGR@Z4d6UJse=++N0(N>b0g z9}=`-le8l)d$q<{iT4h1J&%Ngb3%bZg|d{ckL8dHokO#7y0_+U2gQs7PkA=}^D%8* zGBwp75GfP>f?faX3`kvF$;R(Z$K`Mu!GcF+;x9S-s3P(TGMPG)K_CL^X$*(c=ysm7 zO1A9HF&1_%cFOKqQ1UaQ!0X}H3O65+EdI75pH(QS$s6W4Sqr`)oI$BEF1%+|_f>Q` zA_`gWEhA7ZN2(cGyxaj^*R$ODmY-j`E?KD_HE-_jmw6u(Btu-jJeVNl{JR6?pocep zy-mW`xF4gx}Tp5=U$Kg>mp`nc>Yg*CNs@{?!qJ;b)aFU_-M=+RYfTRG$P=icain` z%{iIycSXH1<}^sjD2t3AQF!@pKk-^QarCEtA&p{)sKIe9xg!Xi%U06UDlol zGK?}aqeDoWxJOt;)7>q#s+^f~8omn6u33_1()bF-|4b969+IXxGk$)@%+OCF4ED7+ zEFL$s$ra&Gt)q^zmEK4NbAS3*#ssKu{6qEsZ#veH|GUm1`KA9Rrh|7IFgdoMt%!+; zf6<2(+xoF2TbcmT7-_67{8}{;@BRu1=$hAlK_90^75J^wI-|=*{kzxL@(Z|$u~X)B zhGidda(w>)2?27{HUhbwT7%gwvITxVwaIsJoU$PlGgKL{@*lYS$i_lxtA$3*{X_) za?1%o#CVt*YvX|#hb!{Ee>E5(0`4Gps=7+(b?IB#=g8Xu_r>E>ulJrx@COC|mU}Cf zv8l6)n+8IdymBQM8!K>V znI|H^i)LdP{wljh__NFJXmu93@w@|L+4tWnJ=j>m5f9pVu-A4$TYRyd;^o zX&?$1=HOHiEis9IV?0~+T%++@qK122hDT%xuYi6SwsDkm-bEU^$r3pXT6Oq1dg<|~ zHp1lQbhvm$!kOYztWtiW=VuQjK*B}_?sO{SD!yDgE@FRMYT@!HXx*^WWHlNnjOUF~ zz3g>faqYcUtL9?`?I7j2{*ig|?lC32mofph+wq;t<~sO=4$5|0%Q;|i?Y$DdcdbhcY9>-PJY~*c=iuma8(>2+ zQP1i|R`8vZ7a3kajn;9Nsa$;{Ri~*p7&Vx*Pm2zyl@tDg@H_hATxGoFH7=v zcx~U*#I+0$L z4bN6@wX#x)@kYGWlvkHRV&DpxCBPPOs4f1+fGlFDo+v3UG?I6NucHfYu^4a z>*CaYjqUV3^%Af;NiPn6LRU6V6M7TK`-e1-e#kTMG+i__3Lhqlcx4jVST7eFL| zGXg{sWB}3P2zq^CskJ+$+VUAjHGe5!0I@n_pT%;~(3Kv!josKdy(po-V=dqOzuJ52 zu(-A@U$l?_K?A{^P`En;PtXwDJ;5coyCe`?iU7ggf(5PM1b4UK?hb)Mb6586(|c#% zb55Vv@7~+*-LL=Pt5x+;tE$$VYtAvoPsW+txfxbs-n}UxyQl)6`9&t@)?P~n$B}R7 z?uF9CPWA=Xt~ij;Q?Rqp+bTYgCQk!sUljQ;P5EH@tjHRq5TCLmxOF$1d`V5l66*OJ zt7T`zi1l>qHHApw%}C3VXO77xWmR+t>vbH3-U(!6c=m8N-RuFwl;-`_Ji)su6A-y9f9b<9HpFW_}_3E|R2sD1ACMy8}ch1B0 zA1XFYtZxY!cixjhBl6Ra6p!C_zcD{GX=qHs%ZW6?dG}142Eifb7`LLE5UzM;Ur0A| zx4vP!UVm&+Ptx`hM@Uo%^^=j3(AU!n5sRDLHDrDv%?E#IZMor!myhsa!ECtt?p$3s z(mcC|)?Oi#nht8(yuOD#Dy0NNd4aJ#LNO1P9?^B|9oXs`V#!+P@5E$U*lwJr4rm+A zm!Qt`Q}r98_jplBTKGcp!8MTe)ria&q~$iA0@KHN_>LLk|5)vWt`iGM@wLw?19ekn zh9!hX2GJ{orgTweMFj%aLmAJ%W{;@+31Fk(t&I$ar+ay^h$vDeEaF04wer-k2M|gI zYm!>OI<{uiBg?n^V*jfz9l8>>)J9zW=t(E8j_|59F{tp=4td*mgv@fAdj{5@zyOswL!7rhIQHI+^BbE@ZX)g5fB>a z6KtxeGTUcELYZv=s=SHe2WJaqZsI99PaqXDB2pRA6-v#p>fnEHkb>q%^qdlI<-E@6@Q!z17CC znvS#NO)Ns|_OYQ4R#&yVaCx;X(rOw}jE@iz7cdC2uxh4+r7c^&ZaF~&d^<9ja^*iu zgf@i>h^f$nu_~znS&DhxYQ;UPY?1z=<#oz}v_sC4#JDB>Clv{)u$ewX4r*^8@1`SP zU@D&SW?2cT9=YBM=aH(Y3mFJ^D3oS)%C?E452iiyEFuv(?H$;^#kQvv` z`_OMgeSGvNYDB7gL*d18I`WvmU<=yLy=ct@IIdGCzVsjoY8oZzqVB-D&e)AVa#GE{ zx0L6CC)KYv>ca4n*fq=AjWG-8V{4IbpXsml&d33R?C_w3CAYCLMiO1F zZlNA-d{WF%)hZCEj_ zCiH*zm2zQpta)(KZ+hMGL>Rk6ig%UPn}Obz!m)+_03;ZIsZ95 zRc&6uoK@}PooVZOl8?!}E3!n}{a^kqY_J-{r5&$dK+C6Z3I@YJI zoEsaZ-9r)63kD%6Fue$4=u?n~7TYO9-P|UCYxeA)DytkcTaX(mT<9TiE+$#<9}hOr z->s*056Pwot-XzUs_7sJdIdB8s)H4T7nS&!rz5mdt|(<7Ez#js9h|x}b&zr@`g+7< zBwE`nL2%;_(7v2ul}|zcS&IjALu1pN=#=0Z$?%IQpjzX%ft49CX6o?6@a9|9Ju6zc%U;}l=`p6;gPc^O{MbI0MmgvuMyA(hJ>WK$ zICf9sF84zBIa#s1gQ>&C$FjWGj!pT#KCLce4Ybd%Uo3l?Ky2sT4WCW&$nX^3*zsQX z_uU9BniN&_xyFmAyQl^WtP{@_+5ko8W9Ijrup`!?Xge81&{3GgNH z<21b5*}A*Cxd&Q|d7T$~8YI5Xo{pBj5{L0Jd7H;p`a07EID`nv*u zoN-Zpu{7g1-aLIVEI0&cZ1h_Om!N}lmvvnn2O5pfS0YI|@r$jCzixVX4RB^LaKBu$ zG<^*tB`u;fA=Rh>z(Ac_lH!-=kU~a?!t;%aRgfWY+tT)LSOZ8<5p|Ef5dykN08+Wele4WifUb z$&vy>^V;p~fA6#Ub1MBWYuftrWT^CowuhM&HOUt{jbB)@0ek^}Ha2j;9*zzKV81+g zeSU3Y$8>jyKr;n{BS7@6DW12RKs@AAh*iFv+?9*kez!!#lp3%o*e85S9BU8d-T1~=XiAm%sq(Xi zY7al9z4*4|Z%`h>UsqrtN?yB{Dkt`vQS;m0n>`1NF#(pupR%_?XS*vTz+?~sScGTu z|Fj5MrA02(1cyKqAt+g)`e}2SBr`s;bpFWTWBM7A+HzxstcHf#&$W=#Q*m4l)-6{+ z^;85t2Cb(*c#c$BFmG9)8LU5LBmm|-a5(M$A~37QN1|x8$vvp3dXiAUSxx0F^l4Nc z{Zn1cfuP76{4>5sOAK&gq+5tvjG@{rdbPDJqR$y7l{b{9RADxszWY9}F4a^y3C?qj zev@$=ePx3lJCNf<(fE#iYhVZq8CCdLQPa}W=Cuu3TB;Gfk$z$L2xg0l?0Q_UP%;Y( zO9@L(RaFGAU({fG!c7DF1;m3cPF1x48P7w+Qg~^*@%@XGz{U*j#iMfrKxC?({F^rZ zuU#UG6u3p^01>vt zIh;JJw3xF>c*B_rH<9kCRv)7n-%B^UyS5RhSq1gN8C9QCs!(UPrEWz`eHz#O8JyhN zG%fP&&BdbB;Ek`%O+_F{{^*Y0m(;dkVm>DY1(q%*-I}o-LBoOB1pGYBTpdkKJ2}It z>8FfLAIEE4$5$=9vz}BPBD;=Zf`p?pOZRkdtaBrBL%=S>iC^rHzjPx-49b)fJc z+++?d#6YJM^_ri}_C&8~S1UxY42ScrrhQ49@0R9lD4U@gA8dLhV-R1)Xs;w`Ew3Z4 zJG3Ha?WMCGZ@s4p!!Y}9lb3{+`x+d7@iwwUDoTvbUxDjwRB&}EYi4sz@Wo_f537-E zwm0591oYDhn(`8Fnfz%>Mh7|7#P^Ra9>&&gco7OTkEKA`h>`*nOjlTRee7J*sTt~i zkH4<4y-$`Z=JxeU6IBQ%mi0c1Is9f}V~cX3cpz2{8uM3hl;#K#u?FhPOOIiPqL?%p zLFXa1`{&gkd|j6m9$`N%1!lltY|jRHZ2Vw)aJTg?0183?2%uKk%voqe4u#L z=vK~ia2dp{Wj@YA8e|zi>j;0?O@RI05y_PjY=Y_daJvy}EgJU1cih;=<^n^d-Qo+B z<+3GULH+Yy%~$kXvN}?T1^i+3S_bOgbdKU!`IL2nluKkzGR{{In`RS>hX|>d_JCcx z!PNIVy7t;4uae;l3-E_$<(0CI>s)Xllfx3i#alku-+g$MSM@o0nR4fKt1B_)aQn2L zG*1B+jYzj*=;6BgxG&?bYyEHl_n=BIRNSRdwht0(H0i;BEVp7{14N8S0R_>7d^NvJ)?40mMfClwLa}=(!&B1;rDv%`G!}UAT!FZ+ zWx?kN5vd46Y|*|nlKEfD=eIRZEgg-d+##>2-kBzyZ1s#5se=V+7vIN7i3v0A9!_;3 z)i)&zT2OagnYo1V^rCVOh!KHNLw4)!F5V=!>2U6EdXRHJRM%%Uw?OWt{UjNS0ST;k=y&muQM_$4`xs+7DtFX*;pviW zRo(G^WuvMhhg57P#En2e)v>N?i}3X7T)0)3Kw|nF2)3FIBSnE=OBGF|7YMdWq`+#z zq^?CS@+$3;pA!N4-6_kfZ-8y~de?=T0k(GZK&w5|Kr_+&yt{Q5aYo<~*p5*;u?Ghn zVxWboA*olH2|=%*D@1!c&U??fM}S`M7gT~K1dZMrMl^cP6uGEhkI}+} z!rxT|@P|C!Vc(0h!tOS8YUy#sVTO3vZHV;rU6ls!1+A4UST$z=J=Fm$;i*qT_D?L~ ziF6czC2Tk3MiS6v60IK#6z0P`GM6l`MKX$*g+e#^N!|mZ}mIb3ye%vANO!$x;UVS-ARYDGi*(t${$Uye{-rNDIO;3oO_ zT~1Dy1SNLeGAoLo9N)*kmq?<=QCSKk_!;-@SmWi&MMcRDIqC_^#s`%b3C(h|cw*|b zF@&2RC|E8%EU>>Rh#{oH%Y(R92AV;{tXWuL56b7rHXLo^!|#%e#f0*^vFhgFxf<^g zh$&E(6BKc*iKCLxM=TWLq*3bx5r~wRBLWZVrk;Eg;2);jBm(^G1H1)RS+Hvs>}!iW zJRBH4aE~zr#eaO%C4j(H`yA4pMM&1~OHh4n7|An?J~vElIkjZXfcuWu04-!$mLCCK zY2`DL`>?n_mU>ABOil>M0>sk*!VrAv4-jm_T9W}NRByaC=7s|PSoJ|=1N+&R@rV2{JNr{+J3_whL^K| ze(U*NB%t3Lrnm(P4S+#%MfK7ZAaITbz9gR5gN=HgpUnF0i7)gA z2xm?nb>Ro7iDzp3e$K-omG2fMP_$ms6-bog3V^Mrl2ROGm96C2Tti-xh?(WSkZl8w;wrxb|0n%2I_f_k0!8G>@) z<6+_+$Odnvxve;3(+sBIxk#kNwR}*utE=O4AR3;KbZI5?ar`~ZG+-f(Mh|B;$BZCS zL`EciJLq{BXP0YrZPT2CPN=LSBHJzV*hQ33s5`YB3_20A>syH#CU?UbOW z5LwlLspvmn1vO0_g$%NiF|d|N=r@Heeo7qCFn_9%GMklZmjt!Qh>n$y3|*63aXO;5 z0H-MR7>WBvt8e50XQEZ%eHungEr%fCknhF7g7&9@`|nZs|G|vnNz2lkYq!{A zE+QccZD&`FEFPu>)=gu}mNNhXBlo*!EVP*Q$y+Fvbs)dMiAQGUZ%gm+96>7o_+b* zXOVyCsXlm0cCYf_3_HgzElRyUO6yuZgHcp*Vst}#=Q_D}WxG%BFclCuCw218IZkTl z_2to!PrKfP`F$f+ygj41ocl?RjVuAf-*#G)&ojb*;%D|)#xfEm-tjKH6IGSxt=;Gd z=3zxb@OwmI(6pCGkT6~@|19;j+>-p4uLga0rSr2ptYD5Jdv+3-|MmAHh0~n# zJ9%b&U34}+&PeHD_uj8gY#&bu>ojW0@%qg^q-FFJ+{FTnnqdwgcW(i*wBSnECVTeT)oyk2wAE05&fsU9R~yJs5HKd_~EQC;)p zOTZKXCd+uAfP5vs$ztyM?I&ciJL!%^+;MO9*W_u1LZi}TZ9~Kh@rI7YnaG|p9Pfoc z-Dl*_S=QpuI+G*|%DHDsIVCa?ze8N6A42Ab_ZKqP99bI^!P|kN2iSgR(O@bH<*357 zu$2Je5zTF^_=A@#c2d=&Lq>ZRv5(*M<#v;Q#GZ#&)?Th=%soeZ_XZOt62Eg#dz_!L zT8wgjAif#Ke~52Cwg2fRULPblCLSShAi4Ff8o&UtI#>F8W_7h#R)ZM`qlM9BsAbjt zLHr11>>h}(V#P2{^bb0UO54EcJB-7A}fT=(3*3*#E zzeR>jxNmn$lVbq$Xk?GA$qUe$)R+%)!3Hco_4vV+w;h2BvU`Id(_5x=`S=5&bBUJP);MPyxp3ItscD;bPgVK5f00!e}5S)RnuV(KuQ|r(2Xl8 zX`-#%Iv5vfNy4=h)Wy=$J~ljrMw~xBHk`pG+BO$G0=@t_^-P~Uayb&~#V8xMpk$1+ zA8xGPJcp|Ds-*MH9Q^I4CHlF5X(BpGQa67Y3N#_9heUu0YzD=}7M_`faxdT41yBAD zJ!`6(jG0w7&y5ae$O4PN7YIDt*NOq>ktE_x$KR`=ST=rJSCuk7W`<9Bf@IGgKWTK-U|zn z(_3Gts{TT+?Xk=N4n1mBabO_=JM}Blx+56Kp#`OSPuN)gggns~F7S|vf9WuR=thCe z4GHeUa#1%Tf?ijtk6+$)qR4}Zb|UV(*S3^)shP^8y_6~&3SF#aAH~xK6qOAISL_gW z$gyuUFPx!|eX9D-L`HgMPN4^LmHafX}F)9v@P6Mof4UEI`sGf{~>5acvx4(EkOK5Ppzax_?Y$z!LIUL-altfgunA z_pIOzmJS%6V^k&(QOEc-tA8lE#@4w^#~CfZ6(xAjMr~>H3K~Y3Yb12eK^aM-0Rk;! ze)+nN9VqEAeC)VEncgsQa+zDPH2K)d~=>GuitXfs%!_c0O z3yFHHe3`X8U`v*cE#PSUG!zn$i2W(BS<9400gETG;FjumD#g#H@T-&en2w(0Q>I?H zXQQ9(F)4v|O7@bV*SBi54I?#oTwT}7lBOoX`kAz*(M|;))hFkLJb+7PYOhGCWbFUmIMjfbF3Ifbt zZIf1nxfd7N6U_VObh6-OQ(ejIbEut+qaDSBG=qd#{MJ}x+^4%bgcO4-D_)0uL#BU$ zJo$?~wkufp{f*(tr)l2L@?NAWW$PyECQ&KTM#4ZA`{ytz z6O(PFipf2v>rF1+)9e^&%C|g+7@mO3a=V1qt^6AO$ZJ(Kb>(T0y!!)BykB5eyo`V5 zXIxF6iNzMZoOoSm>(UFZiBhEsV!80S3w)M4v|G~WZ#(OW{x;wQg7|K;ERV$KQzM8^Likn^v(^%U@`N>f>z&lVsf#=@KhZ~?|c(Z;Um z*`z4IO79r`<8b6bJ!3&4;0iIUcKaTXz)An<7(fg^r5oe`)L7M1id%W|KMp~<*`GW- zqi??}_V}G}|8WFo`+w+TvY_{D?bj0qe|#mCaa-FXp<#yBZ%{#hGN)~N3Uh&)96_TyxIv7NL)~!ZWs1OX2roKkK(FYCgiMmlf#RKN zTg%z^LcgVczkBISEU|2W;c$;z5ahSDrvwP-5`c zru+jV)3ZCL6@uXN1p=jsS9Q&nHq2$L)S3|ZpzO#rad?nJ>=4Q)5pZzGJ*|E7dw0F;o|fD&>8$TX-&|I=6KzvR39 z!W3F*`Sllnm(znut~BvUPVf-2+9m7CF@=XruKQ?A9LP2=pB>XsR!5=qRc&4*g2C8neYo{(a*A#&zbu=00M2-0pXJ z@&okd2k63c^RKI0r#IiRM*JGq0mXrU?UNgdZ;dTg)s`82L;s#q_zO?{_wDh&^Y#$= zRl{arcbwkG!l!d$kOl{EI4(%szxA+cFtCowQo;cgga@9IzvNDTqow}(sApDjV%Rpz zo5h;C=ii(O+-|&YKLb@{Oj$78 zcoC4p0k!>GmXcj;%O0pk<@{4=G(Cw1a%oeE2*TVCfpRXCSgK-VX?TW86U^1c3^f?f zOrf<<`BRxY(DqXk2msPU+)1bLu)3lj_A*Kx8$K)BEv4?nmOxyt_$FA3tm?0rnTdH3 z7Cg3#wx?xhqdO3|2~MW_s^3eAUVsSCiRGaBKlKdxo%4=23`^`5AwMp@%TP^_s<(ic z)CTi`Xs+OnHB)Evd6V~lYy z&xaqwTBMFYU=Mz(4%%8+e~wB3kq<&xPAlZtL^xx==*>iF`vUc^3@#XIvBcKxNjh8-O+zwf9=Asi-E7arg4QS9r%xh=N|>*5U>)9li2Z3Q zdh$LG#KHaox1ldwq>QYaZXzWN-1Z6v4cd z>sZ{d_@V{(aE)r1R~Fy(%gRtTxr8u|cW1`WzFmEwHF;fqu4fv6mvY<&?9QvSH3eeY z+11cyXk=>OhEaVGE(Ao>x&=}vqib8j=N2puKj5A2uWuO6Z~!O!bPPlEn=K8bHtODx zD{!Tv_Kk?H2m-rnvC2=OK+M*)A^!)+ zkI|Wx2p6)+U)x1UH^E-U zdms@JAcjoD-%49`YsgKG`*Xx0D_~xz1GOTxdB2MC{yM!nVOs3IV`I9PiN^ab4Bts_ zKD&JFg|1U8m3EFO&PtDjFrtnmIDgeASt=+)fTvX^=5+hC>XHSq>A|>1-2Bc()e9I} z-FhTZ6wt^oy{(!_w0){-W}DbFe(cI=&hp}OUlK;mS|nbuIOTFwSM@(G@Rehoybt02 zCIXiCD(bu|D%eSa%ITp@rW4Lx%E#0xJ9j%!uA2uFBF{5*v%MZWr-%sjk;I>BlM8UH z)I;JRqVtbsXN!%lhPXMa2paoiGzqR!yf1T+v!Uy_#@VjY7LqlYriaiJQF3m1vsWW* z$mgOv(}B#2+?xr7G*1*2ju+Z{SZtn1rt=ObbWktKJYp&$S(^#^BIMRu;Qgj~j0XC- zLQ~s?pA~F`JsH8l$>0+Ed_yjdNUvPXUfLDbY$3z|oaKb3O~%G{RVrO-I3YLZTaVHw z4ngXf4G*D(y-caNTN%{!{rlF=ZS>_U<>I0~?Yw9bdq$2l5MdS-B70$iv>QhGVd`*-MQ#pRk-2 zLeF|7jC38DgN&o7d$$Ue$9XrN+Gd^TTzR{I?Y~+#>^r<%*1;z4s?}s3%F?0b!~?^3 zBRZ;eo>)ykALC)+xD~0r&>XpHvCw_;xtBx$lnPqZQ_D?^(sGGyO<_>qjv7K?Pvh%H5w&&-8o*;DU zSR6A?KegF^Gu!x#g!7A8JW;$YNF_HyN{d%))_tWyjF3HU3NcVon$852u4Dttz&zqvl*Tbv)#myR;sCvuMyBU z{*p|uR}=;!M1JKEBjr;F-x9v^w$l&y{0L)SC~#^FtiAT;>diCZ^#J(#*k<7n-M8J| zs3WkinL5zmgYSZZGGp?*TCX{!@I_fi-%>_{3K_vBs388{E;ycye_>Oo7Gbx(C4Z|C*&w5jQJ~>rCUPGi zriQY|%L^qJ_t+l!`b9275^4m`F+~*NSkeV{Dt(&x=ZuDZ~>v;@+Ql1Rotdtgc zDZPkH*t7ZNKb#-Mg(uSy%g3(CkdWJ(y{0{LY~=nvoB2a25%qa~WIBGrb$?9NHTIE% z>Qa$Dl!Mg?4Xwu9NIWw+{xMUuc*6wYecm!LI(|}8O<1TGYsT?SdopRZ1uHAd!OPJY zV-lNJ;$Rhc@g9f-mCSBa2(CME|FeY4`^RiV*1UYgrR|}afz~PB9qx0~Wyc8qNF7~| zj3Z~zvQ85ESf01f`qo+LxIn+O^*+QAU)QJg9upJ7kA$yvICBHm-MJQoozF0GI}DxK zZYO5fXs~?1V_9!%LGxa}`0(pm_}dn@uwfXVi*WZl1ELg1o!!UnD6J$)C92r1L{}Te z!{vDv|H7=80w#s_!R?c?mkla=O6e8~;`0GU@6sQ34?o8A5vKp=xkQ=O(MosVQCWUd z{di;Vp{YlnpU8%r(7P(qhyFx#pUVK(qpkB6ICn+KmT=W^X7BJqzAHzMX&htEN9CRj>kSVgfS&qBj7D<14 zX3M(y_M^s54usp6gAxtbkNqHZ4X6t2|3^xw-tk+OEKASQA*iwShM|E+i$%&n{N+^{ zjbD7)2h$Japi%&3rRxwO9Bov#s;zG%H&1MAXT*t<7~)OPpa}Td2SF0=G}<;q^XACh zRo;zB39rlC#H+VhkEDo@f$yXk5H#oCn}8QY{l}5Q&+V(k@3Fh4%!w)F+iqcN$IVLm zlVTy0HPS84T^I6r}{6b1Sb z0^taG%2z4SEf(o(boC9excV1g$3K+KHYbr|!fIXKkVT+ettrgJ6nzHMU5P|iiB@|z zuRn;)UOFxfwzxGC!>7N0Jtxp@kckbL%>2`$jFSNk%xk3uSMj2MW<79!AIs@Yn*f;- z9_rWlk?*7j3#~-0oth)QgzmaxP5RHk$)~GyngNFkdJ_E&o9YsgvReYL0JRcQ{VY9v zR7LiV=l9t0?_Mr-P}cH_a0c`yBJCHI=`uakfzr>c(bV~!-*`S-V5&~5)lRE?j+b9gI7R3#xtS*HoMhF72g9=7}{gHpW z0C=ILFhJq@q#lr~4IglIKMFD&OlPKKZZ#5kb$3v=DW**%0zNY!MuYsFfXxM;!?Qm=r-ufaei9h~fMnpLZfz}`Ry-Hi_@b(^QTfH1dz^)+0BE28oM)$I)qrhu&H$EGxT73w zX$ECM%~E4w)^Sgu>G#eg5W{rnc9`-P4!==pzhSzu8w3OpH*|AC2j%w#<|!?<3rlXP z#v+qj&DgM`O;*C26DSXWk*W0?Ki~Y%U01_s< zfw&vF!5dn){)YXO#)X?&n>pyEh`~|AL8oXNv7m)0F82YWSU)rGO1>uJhH0WAXJ4J+ z2meE{F84#v5oCwEf;Bdnw&AxqR?!u*`^#^MKw^8c({kq)pYeU^;QQL_C%-mTKtjqzt}ahk0dSQ~^k{$S^6F+OPxrX1V2vq**lNnNPmeVkYfhwNOqZEG4T|#l0O;Y< z;;JBG96RU)_-E_44BsIX9%J3ci(>x(%`5%@$vw%$QqOL5dNG~{t6H@ISW8L`$r!Y~ zUxUACg{qrAc!E-58u{>%ZA`@aIqACA7-Vmyr31|lE4+vm5cOJwF;_5MBm%zT^1T!v z!68ii$PB3<1rT!Sr~$)j8ttAwtimN%dmfN!wlMOhY+f1P;1C7l209VGhEkF)$6&~> z%9DxUStl!$XU@;dk##521)7PCf#tCwoc)CQWo5|puCZU7B0yd&cpHXrN(ZH-( zqC?3=3Q>hrQggw7K^56sxx9+MzAJ54yGr(AFB z*V~2{TCqR)H#i^hcgUD>jXD)uyBQZiT>#VN8b32R2evt(a;^gmlC{1isBI6R!byKG zFsO54ej8Rld>2^mcmDDN+!ns!pmCJ1;22-Ll(HA^ZgQ^2 z3imd2$=<7vYRPBKU`;ZyWu81Zv>DUT+HysdrDBGQbgGW3>An>@v8~Aj65^dj^7^SK zUeobqECUUA5e^Iy`_K<250J}!;}?2McZn|pYUT!|4?Cu`hsoa{>r!TcJptIDl)}T;*d_~rSukF81>V3kn zHa_OhWz!6XwRgIl=~oMZeX>pYnB%cx0*i*T8;4I^BxUi)Gu z95KB$*U@xWf38l)3=-#ljjCWgv($_(r*k5;2PG0PK9UuUdYxzw@r-EMYa{q>Onf-6SS5ivc=T@V_0ePIB#jH!| zSikdeuvY-CML0=GZX`rTwhu1q>6Xwde^1}=5^d{Q{pRw51l~(}ZNA7TR7gawQ;#X- za%?wZl+}0lmQceE5ptN*${p2Oa<47<`Qn{u;R|2_YLLPEVHq?Qt>?&rS5G@!!C8scQ9wY|Il0N1dRX5P`> z%9l<(4b}}Gu3A`}eMpUs#mHav$VK9Pun9BDZZ7Gy84P*V(CT83 zabxX48X=e5B-|@U4k#&vsq{pMLl^d3oLPHL-p>n072k9zOl_-j_wsEY>CT5~sPQj#_ zk5?C@uqa>&8`WD7eKzS))oYMH$4$ur7ghDP#AsP5UQ6WYX^8qBY|*ob34otNxi1!GGWlpOC8pR!o6#woztBJ zsntP)#mxm8(WpokYKJ!nv;6L#6K?Xg9Ug^g;I!2jfJ-UJiR4ssl<~*$U&q!r!j4 zdKzKh$*fs!TWnkQJ2b!e8+k>%&Ej zGEYjU?}E$#+i>3RY+Z6iKvopfHV0%yfkeS@^8uzGF~z%wangl>tcZ7RRWU+HzEBbC zEw_a!6#BBR9^8z^u3>hT87YVUniEj;1$35U-O5jp7?2a4_yo=@Hg(59#3+n&6^etAm|6;iMD#JpX zf?P8WYTHZw`^x-k@mA*K@_kvr32hL%Yu^fKSCvH6K+vR$AE$fhp%`)&DS1*0?$0Wo z5Ve={qRHh`@3`NJq)~$(mqAFf`;6U@&Gg&NRL2`#kp(&n%uq!X^Do#^Ar zp%^0Q)cvw!k?J_TF3*ZX zQrbY>pwD}S;v*g(iII{0)%4@P>bb|iM{(bE=n;lmRx}s<+PnaRUy51k`@#E>8{ff$ zix#ds*Le>?-m3+uxt;fv-`@hipKs}F`91mr$Xq;gw?9B5Pn$HtK&R!ud*e;GBj3Ns z2>Agzr27HNi`D!zDn>4_qOZXgv`95^uK1{(FV6MYgU=!diGNWt$|fL%zMqQDl164O z&?z*t!7h{n`wfo`o)|s=cNtZ|L|0kLBg-fuTMBivWI1rIqA^qD2!Gf z6|g~m5do1}=H&d`&VWoAh>5}tw{9SC5?LPo`t$L0u#GJ%E4U%b`-|4S9mD$sq_y!u zn|&E&r~p&XB&^u@E^(QR`sLE*UDV;w(m>bXy?XUd1JN6ETZcG<0OQ;p9Rt9>s-MYA zln=-|-E-di&L1pPX!6fHWavP}9+<$Y?j`lj@4srcT+{w^v;yc7%LTV#P{Rwn6hKNp zH?6)pe1+$daq|E;&vp_MkRhJPoe+;~V){qm? zfj&~+Dh9!7exk?90qx=r+d=72men*G4iyBa@DbjZF0B`;o6F|HL7tL8Bb@sOute)@ zZMxuFwJP13xJHio%3dl-{q;`9)Sf(-%<&bU!y(RP z-cgp0PvFy5F&Xg&I@1}pIOfF3T_sVOmCIy}gOotM;1T3_Z;P71uIctEjpB{3HhA+Kv~o5wou3MO!KrZb z)1^QH?s(64Vi3MESyMH=uX8=DEZZw7Q$Uhwq5WK%N}cj($muIWl%r5p7t=CHM&+zK zF{3k0A~`ygSi@eabX3HL#RdqbcCmLpJV-bmV zUqO>zELf7`G+aBgKBc)OLBw+IMnQd$hbqeAU1IzfnGdVio1ztKqm^UL8s*#*HYHZb zl`>`8Hx$rJqL9ZL_tt_wR9aqZWTRebTt1$whqxuADNh7M7(e3mIKLIwEu94I6?LaRgifQxodyDvv!vP{Kl*I4Cxi z++N;+0;?ME?QbUDBWCwwIdpmGoj){*BTZIPVJm33vG~>%$syHAfBKz}uxQY^>F8$D zjiV%UB-+)pB_n}(Rk$Tl0kdA=lM#&tkjGT`a&b8_jICp77N?tKax=pzgDPnSyVH(! zxQW!J!8tNLhwu_8*Zdl9{9L()?|;D$Tnw0jVzH(+V7jAzbRP-6T7DW^)L+pPk>uFSW0DLPW(F z0H+&a`Yunz7RhNwGZ$!zqPT1r>RL|&akuM~<7vFa)SV}1ZE0-`Kl|-DSvB^SiBfha@p=N_!-)=&2YG0Ag=;a} zcnh0aHS6Jz>bK*V)J{a`e2M@l5M+a%3qWT{wlzPz?PfAvU18tO@{;eksUOtB9O{vR zK5#qYsbJo22|eQ9qV5+mYKE)3VJQg6`~vyDq144;f7}C}&lawVV~AnbZ_q@1Jh4W; z=VEE?gP^n6PBwsJ?5+BwpX60Akp?2^BMi;Tk#l`cRgtuU+1VF8%DK>B0EVHAr@%b=_vLA4G=HWY%eC#JNyNa=H41wJv{G^U|YY-1l|C75yG{!eVDy=#{P& al1hQUL&Rpi&OhZ<{Ugf!-(LUm?SB9_ybnD9 literal 0 HcmV?d00001 diff --git a/docs/img/cream_flops600.jpg b/docs/img/cream_flops600.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9f7a5a6d02e1bfcc2aac048f4eb9b80ee91d655 GIT binary patch literal 106935 zcmeFZ2Urwcmo8dO&N)iXC^<(7l0_saG&$#-voxaQ3<44aC4&UXQBZP@0!q#(u^SXY zp!=5p%$fiDzJJdA^W1yRz2~_zyPxXb-Br7Gt-Win^{#iVYSd@cIzVz?RYMhkph18k z@CBeY@gy~rlq_`gv{f}8r~nWM0N`Rq9TZezCfBM^b{-yr) zciqO($`(B4cVE1|c7eb91(ZBIgS?#_9DSL&ZEiCQ3knG_E8DrcGHbZo@c-Wbdpv%B z0>Jt5zsf0X%81B3wyKo(F0Q~^yu2hazM0dv3x za0J`{Zy*2&1)c&?Ks=BFWB@rp0Z=TvAnp)9NGRkPBp#9m$$`9qR6yz= zZIF+UVaPP(3uFWG4RQ+kg@%qsfJTAFfX0a?fF_Bih^C457|jgL0nH0780|S)6530& zLbNKhX0(rJqiAzz>u3jPKhObmd~_;w7IZ#zNpvN2ZFFOFdvtH~F!VU|O!PwZ8uWJb zLG)Slb@W5@OAIUwatvk+J`5QQbqswB8w@XuFpLC@9E@^|W{f_JS&R(~I0h1v5R)F0 z2U8kT1Je-G0W$zI3NsV46tfAlA9D_K2lE^Y3yTVi3rhk^9m@#I87l-U0qYf3E!GFD zX{;@*AK2K~wAeh@cd@mxt+0Kuqp-8FtFXJVXRvp$FL3a2m~ez~RB?=P+;JjsGI1(# zx^QN2_HeFnNpLxErE#@!ZE=HelW~i2+i@pxcW|%pNb$Jv?&3YhbH;m$_Y$uLuODw2 z?+hOwpA}yUUl-pAKOFxh{yY33{0;m|0y2U-1WE*^1bzfb1Z4yt2)+=U5fTz|5-JcH z6Z#S+5mpfP60QnSIi8+WBiOq?Fi8F~Ch$o2;NpMLx zNt8&eNWw^RN!myjNPdu#lM0b)le&>6kXDiolYS+`CgUViA+sfWPF6(LOSVOhPR>TI zL~cX=jJ%kV==JDB=!@vb=zlOUGN>_lGGsIKG8{5eF)A=R zGrnN#V*JKL&Lqd=#Pouxn`xh!l39`2mH8!eA2XbVo<*I-kL3-^1j`jG7pp#N1Zyqp z3L74qB%3|k3$`9MI6D)&7JDfBTlPf`Tn;lFDsRDxnNI@|{SHWVzB_T4Q`$7>y?Lw!* zcZ6+(bA>;N5Q?aZz(iU_5TZPycB1*BpT)?e3j*rwUaHD-MY(q*Xr(@yX$hy za+Y$h<<{j{^GyDIVVwyytzd>E2HzX{9GhAC%FR)s*9u zCsfE(^i^_HR#jP59aP_{!qvpoLexH}W2$Sar>ZY#Flg9lRA|8OOWc2QzfY4u^O0tb z=H`Pt54<0=YoTe~*Gkt~e#r6A{b7qXpsk^uuDznerQ@a3p^K%fqnoR{t0$xvsyFzE z{E@|@%17sq?>$a_yrj>q@2B5mKx|-YP+{;Bstiqot{Vy%J~13Mx@F{K)M|`vY+zh! z{KG`WB*SFKRLnHWbl!~1EYNJ&{Fb?^`Fjgu3oDBTOH50sWu@h{m9|x()emb8>pW|? zjgn24&4I0g?F-v)c6aU4?Dp*M+Narnb-3%0?(oe~-Z9hh&`HTD#|h!A?p)yf(?#2* z%mw8Nb***7akF%5b0>3maqshB^a%8r_T=%5@?7_l_R8>rduw`^`apcld|G`eK%+9| z$L$yCx8X16pX+}SU=Yv{NE+xKI2Lp#C^l#>ST(pL1S7;Iq$iXm^l9kE6U8TQ!XRPR zVLdQ5*mKxUxLSDmQ@p1xPsbw!Bhn(iKZ8DNf6nkc{P|X-TIAa(qA2gEg=m@Rf*6b# zrNW5TtX8d)6O~Oc`P~yu(RFZwtM6!5tehOBKd&+#Od}?_bX@#2`Hk9}=0eWG%p$BJ|DxSugW|yw$&!lFTcz=(H)ZZ+>*ado zeH9WF6_xar$#2o#`n~;DWm+{=tz6w&!&mdBmZ~=X9q`WY-F}^A-F&@feNTgALro(` zV_p+^Q(QBoIj9-l;@Gm@YScR2cE7EsU8cRMgTJG!lch8FJ>~o4F1)ViU8wHh?jIjK zKOBB^__)<$(X-TR*!!tZw{N0fvwvtnb)a`paj<*n?oj8j^l;mV~z(75)FkM*xZ2S4^7uPR{ zMgPUCrSN6U<+v4+mCRN8)xx#gYqjg*>+d&|H^w#}Z7yxuY#nU-Y+vs@-zD75++*6S z_$vIh^PB3oseP0EodeHbcwb)z7#K+Kci_$;-iC&|iC30oT~qFK=$&v?Dc<%P2P#>R&L(-_QWdZ+yh8FDxn~ zti`P4;p*YdtmExr@8oL7%d9LeCMftXnBfoapX(L=_6-FVQV>LS5~crP@87Hc31}?| zh=~XZi73d3h{(yws42)OC~nbFQ&7+`GIG&TuycWhU06&^OjzdM%I}jv2=VcW@QFx? ziOFb4NlD45>1Ze@s2Qngm}#gPIT-&|IM}#<7dBo&u<%NN?SF6oT7I7d?f>yfJ^-XR zXqD)B=m0AKA%&okqM`Z#CQwjJ5O;y)x6r=}1PvVn6AK#$7Y`pipos)PgP^0MVW4AT zVt_yp@&w!uV31;xu?Q(*k?UGvvwBeoKTUmu!*;LkBcym)SXmMa3nhW#tu>^$m?p%`L5M?LEDH{R4wT!y_}ZpXTNlK7U!<*xcIQ+1>m4 zZ69%Z_Wj5C&x^}nzvY4e=>H8=)oziy{GZ0*hM!u5WhwHn`Hkv!6N=|N%n7o{avm_fB+qw z0#YW$Iz>X`-YD~A_|stB8HQ+A%#Y+CJ+PdJX%)fZ1*Rk@qo~cX|S0WcL~4V@iIG! z=zP${Gi5!8R~3MFMEJ?6!+PW;7hU-JTOOi^A_-}PfQPO*?nZ1E%SC07=n9X1=jRT) z{7skadCBHz9D?wU#HE8QpKHW(e*NscB&P>3cQHgoY5T9a&td3yH`c~S;#WifMZdDv)@vY z0TT*Oj=6qR9;(({KYzSypw`;b-l#!ie3^MZGcY&`7256$Nt#Yvjswo6dB$a*Y{`=h z*Cb;GK9pkXkkNBWxt*iLYt^IrFtq9Mld($-(!6rhylRi$0`gEX%bdl70P0bfSa-I# z&0P-x7olZ6?M(Ha%FLl-?+!(0;2e*q04^T~+u)yC&mF`(A(>qNG6gWU6R{Hp-?o2y zaNbSUiI@4SX|;)?&x7Y)Rf~w3YRym|cGAYZymD)~M+yFtuZdj>JM($8V;SWhZEDHr znz0T91RlBR?Qa5XiE(=*MeZ_BF>^6er}(FhVfdX|RmgC?&US?Gg+$MZ=tDC06a@^QQQoHxbhd0*C#TX=3V&GP9ZMCxwog66X4ed!NImiL=O0&fINdk@l)q z89I`z@6kF*R$S69u2kM3@Pk^~4c^#T+Os^npSpaafooY(&pzZzI2z%t!PRwllJEdx zM>V-b)uFDVEwLnHyA_(WG)5+$8Bc0X?j;ig(Emy*+SrQ|Bc}S=;4Gcs+0P5Db${z# zQ_*A-`;lc;DQ_rbJ}X{+TYnyQNEw0x3Tt)w*Ef(jLxhOz+X$sq=a^JsQJlChv zy!kVqwG&eY!RN2>t>wIf5Y-@dyfecv&WjVHvlChQH)?z@BqxuL`Loqe=dyxj6_@yl z=1e1{1|VW}h(aeDcpffYJ?~=PZPp5A>6kswDr{YlIXH$?uR)wL+by2^4jn<692?_~ z4Oo<9v+(Nc*fA$0+-Ub4(Pee89C}Y;cs6%32Yzw1d^(ygv5S#NWjBd9G0nWAXouF!mTYjWNK<-&oso zCulLSo8%2{tcTxyh;5}>Ye&_Fcq4kG+&DkoiD_-kAl~cD|J=k>_|O5{dn+%0PZYwp z*4G^)ExojIV9YLlo-Ta~U;bd|ZfUbT)2hZQ{=sfS&XqLbmqC1$k;|f4S8Z9Q_)l&l ziNqwlw1i@xAv@;v)Ip3S=d8qP%T9Pw9pT7{%Iw}tf{e?JN1_f9($eVH7c~~^dD9Y6 zC(J~pk3X%f3ab8q#*tL1>ENmYKRNRal<{_zBk zZX|p96p^qeQGejxKuBhKUyfVkSVFUx5pOpuPdjZ%Du*Yo)}Sj-xFL~@Z4S^v4T_Qz zkvq<#U{tjeASi9TYD)MfZfi_~(lEg?1)J^lH?!KOp&I3Zx#~Fw`pqVqqncl;xf0P| zuh6Jn)o8NNou_DQJ9J@FdLWd{>g$_2>flQ%8wV!7&qe5BvB(8Wm(pnz&@s*RlN4!( zx8B1G;zie}z7?JiUHYgsxmO!LW9JWH%zpH&scy0k10WEf4^_$PZGRq26z|ybD{q+g zGB%aXi3THnG~vX-ZgMnZ;PF|W0r|BqA}JQ;+;wMqtGO}PMNqvo%Klzu-@wd+A)|nZ zkzl45M|XKH9**!?Y|hupy*BorJr+1-AtPm8p*Zj$k$L7BcWG*asVWUML0He@Cu5jv z2_J_Txs^WOPOPz0*H}-uPJ6evFdr@*!P6 zQe>X?j(4(jdFB4m^AS@~m0_DmG|8;H*t9=t&0gb4?;N(f$Co<`Vmi6;*6_1IG2S>9 zIp`=bZ?WZ(no>XQ;&$$0MGzQ^S9^HeGVK>gn;&L0h7`|rs?s`?jd8i?_CAC$T>e5 zGdiudIRO(_syfl+ktkPRmF34&HsM9GX_!F^&*ZpWkgN#4{KOCs2YPBb|MN&rBkU}S zv;?Z`!N&|0D1fq4-@|LeO?qVb3!SmLfH@~=7S$W&N_uze;vp!8r~kXj8sbP_?F+TA zJ6|08uLqmse^$R5u}%9}lI65m!2eUmTO`3D2iU(3*;;}18sF3;78x%s-;~bWrnF6R zvUg%nGmPL)dOYDzK;R&jfdX)U_RIgGmOoWR0Vp784i@{f&Z(PJ)J)ZLoU-J7VqaW1 zH4o*di4SNB6WFw@EwBy03n=o5AMw>s4-_y=0cr85c1aJ!+cW*df}EVQnLXJ(?QD)8 z4GcZuKFkXG8W-le_f21p`rubfOe22$iao4xlJO1uoh!C@9Ja{tKIdm|^XE!x%=;?> z>2y9>64aS}g;JBg4X5XAKHSH_NLW9MaLKsP457+hcYCCpC!*pM65z%98gr!I1N|M4 zBJy-V{WY>9r zph3W$_j;}QvcMrzJ>cj{Ze@wqCvlMWN2P9{+Gbc$F@xQYj&gy$fZQvwy`1Gk=66gy z&l5*I*%r~B4V#hxcdg^jPyi5mFa7b@3N}2BVJ+3koClVZ$am*h(=<1$Ka}Nc&f9-qPWPSoIH$gGD}!C zM1J8;{f-gx1e+cO5c~F(E-*o{yogp$Qvr_5`U)vvQRIYDAL z8xCLS+mz_XRN0{nI{r0svwf@l7f&|TeLW~wpL7j8NxOfl{!_;0#dDFOdfKjg(c5g; z*yt&9Q=u*>Ak|<2+4Bwse9esm4rTB9-93L=YQ7?oeRrb3MYJ}&BVT>`{f zSL_q?)VL>uPrEq31O*twVzxWdxHIkJG;=e?Ml*%(40lPGhC`mb{t4({m+P9s`jTlU zH~7S9s@TTQ-|SYAy~C1J4v}cXK9M-Vno^a?e<@omM|SbH{9A36M%A*(iuTp_cP`oM zDN(Ea>SlF)U+|kG(Iq*&;(#CRJEFu4F!#W>dhp55p~;6QjVPcvsLLUH`@y>>lehi% z{KF9a-d(p4T(Q^C!hJ@3C)>t_YF5=Kq*~glLdqoGNIE5!A~UwT!ew_n2HA4y)7OEO zD4^dsGpl6Z!hVjgYxcaOTW(W8Vl>+(YfpuLDeu|PUd3+s-M9VHC)aGol=CgyPs$Eg zp~nH2CY{4%0`Hp;iI?BhBAeDOG<CDW;);Y%aI_? zv)-6lxCE~)@mimD9>AJb#=CuEVo!%}RrCtV=6?z_>0y+c6Xh{EA~(5WNa*rN_c-rp z^><61MjCF}(b{o6TMd`-h`m*i(0Foqa-poZaXs01^Y-w3G$64{JF7!Tv*oQ#DGcw( zWn4~pf3YU(Cix~i#LHp#re*9(wqB{*j(jPEbz zsn3`U!P_2DY*%nA4k$>Tzb4v@LyR-^c2jg}l=Z(YZ4k?Q#EN6ExxbGcIIXXzQ^!Ui z$P{?G7Pu!Rvxa-EUAgui^t773ZD$^j{!EE~%F>R{Fc?s)Scb-8_OkuRhCoLiU5NgB#n$7Qa3Ux)l@C)Nqvapq=ES z5ZCaTjl1@Py(VuIKp^Ylw;93{&u6ZmuGjeFs%MZi6Z`AZ8~vR!__6TC$_P z3vT|(FGMs;Wcd8m>DUo(V(hMNrnSZg07_m~?l zA{qEe!9+eo*wxc26oBqO*`R-+a-fw9`KH%ApXiLc<{M8)#>+IHc*$0u2bwV$I1!Wz zfAfAG?zNQX3qtx@fs3-rgDh#}tMwc4y~B6Tq)Oc`qG2}6!yL_DXuTg;5P(ZpPBD$BZmgJ+2mq+=@Cs*GK-dD%Zf&v(gCUeH{+l7|5Zw}#- z+bIN3JT{=wop(2K#|=<`PJ8C={HiKcA@##)-4fd2>5eCUeZ}R?DYjN}M8WBu71(D3 zj~j9iBltBR!k{u4JFZiaeTr)mLxZn>|1y_BfAZB%{byC}F%_`K?pVm(Xd6$pTzH*p z-O{KIb=r13J3ccP+WFjd%Hq~%9y6i-*=M1|I+S;oIcuq2yqoRxUM!tDo+@;F@@%mrun?C)9CG8n-W9DWYC&EN=g`JNE9NIZspW zMpi68YakaQ@q69)X=4TOvP8X9KUAdcIwz}gy4zD&MGyM|oH!#!a z9CXRQ^h9?k!^RPHw$hEV3&7cC}5x*y$KGDl;0qpDVddsbR3-+T)S*&Upx>kZ=7s;l04F(nJhAT*3iVR)w;%X{e8k`F`PG0 zn&u5WKPjbfpzJ}DcR5AaUrQVsiQN zOZSM1*5^?h8J~olz3;?mD8TCCtsX*RsS^bpF@d(C=+fLGYj3RK1_g+<*tpAm9S1y( zm97uQENOuNTz(&} zq#4!MMZ*i6M-Y>}@dA`B=aG5wb`_N*Gn6Y78;_qG2`XZc0A6X>81w`_+VGA+>9=Zb zwFjeOFTMACe+HBo@?hE}sU&Lb(Jl+({?fsg%XxBGfNR@kXsALR(?2Q``nu23#O)x@ zozVqzOLlK+>yz9qGb7UrfrnJ;bvSzJ_KCJ<_eW|5R9#O?hR?*$34{iC?l1J-6042Q z>Sa_bDC1QXPqH>E@S%3#N)Ylk;YncXNFWIMcJXNQ=Fa(cppB7rq?&G1I z-82-@zRAm~|8}Yr(n@?=ydP4)vfw_Ln9S}w`Ou|&R*ta0)Hyh;gt2O39!j#Tp6Cvw z7G{k>)N{8n73h!mxVo^_w&c;ibqPg?@Z6;RBEYGoR8DM=a$E`E-`P$b`nc`K9BvKk zX+=P9h;pKLyg;w1>Fv4&IIBI>=O^D!nr~W$P(Y@+>LkxN3Mj32kaK9czXRJa3)N^w z0jhNfswG2MfV2l>;-*J>!hOa6v8u2EJS{_5n1(T@{ii<`J(BZGA%PI#gB z)vA7mVLL34u#9l(X@Z+0BPOyJVNkN^(Gr9L7Ui8#fZAzix4>k!!xhrSePH>?bPjll z1{bp$;3dNMVq`lEZnl!e_j`Xv?{L|gdlzItcPBz@;(%z41zDH%Nou1&hdyeHMIC3d z4@F}cWgx{xnZfRNraG@DF8J}|R?6Hy^)>dT*K+ z1q6j6{Xd5tht0tIiN`*h8*?%Q4uYh#UX)`EtH!!t=51YbDUv8v16l(NTN1?~)&hOe zT&BbKcSawq?cOBK;KM}dBreq}I%r)T+`ZBbzM^yWgO=k!{u~d43Dd$*9{3}qib?Gi z9@uB0+x|(cEN->SV&dGG9D93xLx+lzWjxOZ>QTcLPkvo>S2ESr9~pM=0^?b0L8n!o z_Qu$}4mA&ocW* z2UJRzjW6Q2L@bQe&flt+q{bf^*)Z8RpBlJU_$}=gzHt3%9#!ymwg1hvqn4U3Az@=O zU}T7))nFX2%fEN0gK~KJ%NBNPzds`yoWh(a&S@{({p5H?EKNnBf~azqiJ~ zp}>fcT5>2Rb%J$NAg77um2#?0YKrjvD&e{|8c+0u2$2@P)4VCnHAaw`wRr9)#h(lE zrkOJwn7?_`X>K56Lc(2g&ykBBN(nOL%5xrF4EwrYPi_jz*PPK0e-;nBkJEE5=c&h; zmiNU8rKYJ>-|NPcbqY;!`Zdr(JNstfb6*Qold+G7ap;xxLy?vf0`$R>;HZpJW)y%8F0k^|m?oaQUYro*P#%}LyEU3{}z8DCF#lYNX=xUtkte=-P{p?mKYSVMqY zOo%(k5fqRGJM6B9&C1u^>l>71Q`&`tFL`>Gk;4bz+zK;q_90O+eU>)eEJbTd-%}e>ODu z=Nqlqwzj6~ntkRrC45a19-*%xIlR3aE=i2L+Fu-MJh%P{U12e@|E7~ufsL-@l{+h! zmpQ{D{DkdygU0)E{LcQ(VI(;N_BY5PUZD@+H76_T=`tzmG71mKAlu2t(SvJ*u#Ma8 zx0m1Hfd5&lJUmGnu+jtmc5;YSCGWm zqhNu&@`^UK)x8u+QDA$vBzgULEJFkPEOT3`fiSxtb=;;sV4KVVS%y7FpK0kL-S=M8 ziwDhskhzIOk35-^@w3SdGb_v3=@;s!+D_gS&7t*HY6OCn8e3c!s@!PZ3GRG;C?Kxg zEU^*hs;S{x&7bZ(PUo6knrBHhQYl58IzY#U_CBH&hJG#mo6^lWZ}R_5=`vp`{-$)Z zvc^(89P9?iBL@t9@)o=lXyrhilnOOM0q^urkg;i<2OoZs1;%e=)|Os|xq|adKI(|| zp(k*?#qr&QV4M7H+0f#IgoSeFdHKa0{p0NJkt@66bELh*;hAo_zi`{Y;TCIsCnX3* zl)hK^f?%X!ydwkzBd1<%Y#-xw&6kLlDe?e*ekp0<|Y#v+YS5y~d9 zBJz>FltPku21B(#9I``*Q%4*6-O)^{8UUE7fqP=&DCYZNYBg?HIsWtwh|TjM;~9 ze_*_j%ScAlY#jyHBo~>ev1e&e)MKiwbRY4js>VThO?3LvgcswiM>2YV1v%plV!51x?&IBj(oxV z$H~08^hZlSy*_)Z8ib1=rBJ{aIdZN0eDS^fk$<<{RlqBzFQK(EM|2Crr-<&cl~*19 zXL(zcyG$U3w~LDcJl3w#t%c>143AzQ-z?ayYl^k&ZV- z+je?tzLdp>oBKC~$Fp=@4mZoz`W7$q6MPol1Dfl#WP!E!5kYW? zSdOcJU;RdA*TZZ)bjxpJh1%v88cv0!Eh{Dw8OdMd_?@71KX1BD?zb)_c8HH;UiqXK z{Ggp=0Ll893!aU)U!_wKSZx94M>?00{?tN6!|!cVI;@KA4UVydgY!r=w^Sn*jLlkN znDtjIcj?k7m0p(KA&sZPh)fTE&D5n$KZRWk%?Kqx0Wa5XT0yKm9?(UGz%b{Ua`Usj z!{PhI=hhakv={4ENDA)d3-VXRiUMHcW^F6U?OtYB)iyU3CFb99 zuWOr)wLNxi!Y0LNC2V-|+$V&&(T782T<-3Dya}VTTRkYJjTy2?BXtmsPLpOOziz9E z|Br%hHP1dGiQ;<}`3B-;56b)fyx8kKVpJn-lO%J5BeyZEq=xtI@}&Kz?sDMq#e&!Q zd#KuUby1pTahwra;qlsemU@v2kjdAMJ|L4|-;5MP0Yh&=TGYxA_Fb!AWuqW+q50QX z7&F5D&G}U;hmno?s!@Qfm23s0k+{@S%kkE`?SyLIE-`r7HRr9z#tw^JgF9v=gV{Tm zUl!9impzh^!46U&gpy?teH5sz7HDRxTWb8qmL=RaQFnn&HcAiYP?cx=yJox@bW3E- z5pmpTOPtTO@A4z9tBjL!PYGTmR1x0`8}Y( z=!d`?@V&?+m_-2&9Ea_pDXYimBU*I|4nuxRbz3&W3?j2Rhw>Yyfp3w~T1uzjy}Z4+ zgeZ9GyGHYoHUl~Q1PcQiSE%|WuhG3D%llM)+wxR8B@hK%KrP7Jx~Ho`Rhs9SR+f^R zR(oR>b`#A2y?wrlBpjSXoOL?>G-fy^1syUbtVd0X@7 zLl;3iylkn)qZYa5S8+*Z%1|XflC zh3kbK?p{%Ure&l=DULlmNl1#%fTWh{+NMCSW3?ErpIBQ1fAd?M9Fwle(J)(K$EB4!=!#B4-p}-QIax6xbkE^La+}p6uuBFZI*6ml}z~q(+iA z)Z&_4%DXps=aJJcME4IfH%~oYmVUR8(tYHe{C<+z(-EC~von2P!k8=O7856-rt;Gw zx@Q}wv(o-tj<13~OoX)_SY3o{YR04m%6dHE)hq1u=E@%$>AmwIeg=nmo79XcMwYzD zcwlgiK80q-i?uCGi2U*+dlbC z6BmEVtAZ-7*P=PMW4-0?uHJ`vbhR~x1Z9zFUVFXpkX9x5Y#9DgG3ixXMF_n9+mF!| z54DA3(ZQ}v4H^}?pl9P&?6L2;qItSggudma<^XdSB`AQE6NI|%-M@4(nebuT%CquE z2Poh^SD`$HnbwBp3Bkdw{(-`u^_TG>+&1hXp{0p}r=uCfRO-tj=?RQ3R0|vK-8N{8 zUjo}t7je$&x%C{2Ow$JOTEYp^v;c=|A5hbB-E_l_nw+oy93YuPEIfs6h+Yx@3i#aB zzlQ=w1>|A5AQ5ATNETSDHbK;a?2q6~;cxaw{>~9U$o@=Bd`bn`AK2Nf)TA@ev>|^< z>FaTYX$1TF@*o^ee4QariUO9kQGh0E?fn^7SpE06d?K*ia~XBM>oM209oEuAySi6x z16n@6u%E)-UF%l_<{quW&XX5m)7P~Eqi4HDNN`0j)wMAB+J{TRo+J2^x7;*UI|Xk_ zWkPc8Qbf=uF@_k}hIKwf@_M)RY+E|qRU6lY_+5z&kL#Emhnx4XZ^}RVo&Dt( z<>~a+U|IFY6I=cg4u|t~rfvgj7Cf{^d1o^n7I?!h8y3<2(3bk+2txUzn$M4I&LbAk zI0)9M#@pa+~t|+-XMq9-atSW>l(8KJclnN1B_^Cekk}(4w5P%-AEy| zYd!%q*c3R)9LJzG>j$+#57&mpFLaQ=xWESk)&pNbQct_5`#Tn>I^k4nC?IhP3;LH< zOn<&q%D14tD~N=BB?Q;PI$t*mpesU>%Hx4}nGWpfg9!{Np+IZ#=SF{(&!F}nka6;b z@-+_%P;iq+0T4Iv0#^XLzl@6h6SYmQ-WyHrw%M{dTI-IM%vDu&{%4UH_k}Q)Qu7#v z3ElrQ5@Kh-Y(j5=u>AN93J3#*PT?tsPsSptE*=h7eXmxG&GQDd__NOZ6baVex}D@VV|jQim<15N2t%SRfj)3MFaqkd zl6CE0;0$lS2RooCLQ-WtmH*0x0@8lMuC1G3f1NwnUnd5g=g&=&FO1E>+v9ZsQga3D zBW~^=W^Vc)=H)d(?O$5SZ#V^#BO_p!0-(hm;DP>i;?(|OTEd09|I~*2ucCqM@Lwm6 z>A&=N;7{URn7MJ;O&_A;*fPf(LNIQByvoqN2Oo?oZS>F5s7rm`XIK5&Ch_gyZ3R#X z|9R!>{$&jtcmHKD<^Ou?{QrL0|5qE5O6OH7_*iggEn})>g1A-YBA_-_RDK@-74Ltj z-W`oDpgl)U=;FzihP|}xiyt&NRJE5uTI|1?o%pYI!~7>q=U<0J&pC_+{wctphcVr{ z^2{n?NdZEa{_Oc)wc|fpQ1m#2c^kZYfXNOyJI~L0#gw~qlTSM<=RUsFvZ#RqWCw)e zzW$$LSxUzdA{#e-+8DngMk&gN**)BRL5GB(nC5T6RS;JH)zJQf9i76W%<5x`%CS`? zl-{RP&mL{`KhGlhTU^5AZ*d8O{|&NMxhXb&`BC9(K@{ks*&gEWm%7sw6QKjF^^z!5 zXAHBGCxqlYZojv`rH0^&`hK9nWv6~ihWnETuZ{!DPae|n5w8MwRr6Kq=8nbl8Dtsk z{2|wuvqg#XP~_yRkcVmuS+Ik)hJT_z9wEFhpuN*7l+DqlT0wF~`bX6xthZ_HAxb{e z!2x>-^l!YJO3jwIVr2zNmX=oT)VCX}CR7?JNo|^DV88g_{z@jT7H7D>rKIMlu-i*_ zq)zOcY87j>oV$7($8GPEk^DWSe{u!?2vGNv+DZatlxFQdaZUUF{1TZYkP`RR2*A7s|-t3MGX1~Zpe42RC%Zp z+&?Qsydb2!($u7DSeGr|vP!z<>z3cpSJgmSMi-k0bG|F~a~{4OFOOflh9cg%6^Abw z`Wf^RQ~*hv;e23X8f3=;U4B;rWTWJ-e9Yg1vC+S$TkSXgHEj)&Rg3}#f4jS8DB#?Q zN3Z^S!Ntud`GaocIEOR%q#MQmbi<%tL;TLtFl7fFd@Vir&+8!hn*fg+%sWS5)fBdX zEB~GRapzqyBnx`~W}yG8cpvyw7{?9#&$deA$?IoedQUj)s|(aQ6SPXqU%Jm3pz?SwM2^Ds4f{9aRhOmB0ocg!|Brw zarv5hu_f8zk|0@d9mb8QDex**W3O8jWwa5Fq+`i0c9Mp-S|zOSCD?a<|89QXHG2UQ zIpm-!lV33gGl&uvk^8|sM*hb}AT#vvBA4*}!U?bw4jX#Kgl4qy!ETXo~xXOXqtP7Z=@{f{~}!@1Ha! zNk4XmR%CA8{-*khregG=VY}PenJ@I%-x&Kkym9Rp9E@yEyO_50UG5l)URi#Q)8IAe%SFou=pafOe5sYlR^rj zb01v8^=u4fO3zk|Ep`!K8rmac{m@Tkm4=zIFVf^l8*2UDz3ce zXSq4|f&m%Ay7Rnm11=e(pU_qo6A{<6#%*MA0UNfpVsuJC51kT1Xsz>+v}M77|TI*865xKC z=fOi+sOVqkhfRyA$iB`f3Fu%ZMa>hkPlO6n&(Q0O^IX_pwo zWwybfpN5Yf8FMB0t>F?28Y%5*kxh1tF+Q%K(1V=TPEx#1qu%XF?RUFo`*h?X%R-pI zcXcCeRP+khZ0&vOt{On*pK^imbO?-pug8D!D~Lrh&!zBYZmzoc<&zkrRq&E31lUCx zOG^0dVI*;*&$@6S$tHjEq7I;D{O}frlro+hf%Q~_L@&DhsWqsZlBclQ;P(Qpkc(%< zhCiP~(|zXmI?wwdwr}AM#=r_^WxAz%LD3?vR8!)%wYX9H#Q=0AL!LV|sapz9w z7riln&F54&=)hS1kCKhdHeP8Q*Ro~a9VO_{v5Ueo^=F&XM*)X!5=bz9w>1fd7IiSz z{@6rr#SR6?lfu?qkYdDt5)3&o3)@RZ%7T2lKA`pY!6-VKw>U#k-<)@Efh+S5qnG^O z-IV`n(5SOA6P={15SfBQ08w$;<8{Bv9Ln)d@ud2Z)Vh~>BAAkkou|$&)m++1^G;;E z!petbK6xa#O#S-4ZgMlnbH%O6A-MOSWzt(|Ti0jOc~J@E{&z3` zx*Q%Ev^S+|XRzf+*fii(6&GY{D>z&Hapd&Pv>(*9)TKGvEFQFA`{Y->{$VYB3utl( za%@iF(WbE?u#DKpVNq2x{==Z^^ZCA#(=@aVte+$kVVDqB2csC2n*Tiq|1aOQL?(lb ztZ9q;6O;Gd7MOzy^_RHmUSjGqiJRP>P}8m@(RdyBl-Jk}!KUF~sor7|i{YwxRj8T~ z#5Sie$skn;&74oYRyHxvC!sxBa3N1{->f_4SBDNw|CC{y5fiSt{FCmn^^8fHSTBmqX6Nd56WgfQ3Pm}5yelJ zn7^;*fd6YEE!JyVEOF<^R{bSrXoLg-7;wJn{ps^7Zj1R{W7ej~lJNlJyzs6fEpMeV zjdvfTe6Xi?FamnbRko;I9T*{Hv;1x~QZbW;(eA^c+Uvp$jgY$FtJB0abN!Ggd2FSh zP3w8&y;GGH1H&qFY4J&G@)y`@KAU&RW{!DikLMh~2U}muRIK(>^Hx6gmdSBji*4kg zdwIlPOUm)$wmEG&!Tkuc$Aza-!!#$R$+3skZxE&F9=F_*wCj#d>n6UfXtVE?sBPW| z&vhD=IejJYTpo0zJ7RBU&rxQlTAAQ}6~rOKaXij(hDU3LmrOi)y97!so#s?cAL3NE zuo&3Vn^iN6d5RYMKH<%}f-v=odii@pubgK{@&bI=K$!!j`{R9-d!=K<|%eO^a6WCj~UnH5W>4#rQK{D%Rjm=BeyqzpDRr!oBo-36k3y1Jc7KyW`wF z?@fB*@@hLRbFXG3kpI4+ahde!bSb_5R_au+a=O!+aTD*JNVkv0w6@rGRqdhIp3;_G zNcXZ*`GNE6yrcE)r%UWPJ%J-$i>KdFKtyn07{h?Uyt63u=c~09I^_Sw-dhL7wQcL- zO(2klE-4dK&jeCLwch@Em2ri8UCjo*t!QI{6-Gf8p)_H61d(Sz^-e;e8 z?(g?r)xA~k4~kV3Yxe9l*Bo<EIe@d!ei z#0`3jB>q}D6!l1H>vb2DSRyWZX~I^JU8FhDQZBPurG@UUmos+!x7N|@!989|YwsVR zkeh*`<+kEiF!_<1j;|e}aTF4sU9KDT#Q!(|^{fuzGZ^*eru$FaLulTxNeUUgDkd%e z4*B_*EqaA^^L%?SYK7%|3o=>t=%<6$^e1<6O!()UGJf_!Rfk;L{-0C!Uw1L@%CT)M z@YilYRaD(dvOa+%HrI@fC(6A4@iKD8W}BTw&+ADtdc+Yrr}tuFRsrS@NF4sW7XO|7 zq!n+9LCgjELzC!d-Ic1mQ_aN*1xG`ZpC^-DuX<)i+x4)n`79UnZstl&9%-)T3YC99 z`8&9yrV!-G`Sl`S4$(Ju(;j9l9ku)hN(qyYctv-q5iYv3GnGsV)DZrb1EQ z*_~w*=r{#y1{YT;$eIK>W{Fif!aH{1yVm2R<$f+f7TJj203pqAOek;1*3%_G11Y@; zgoRFvJOD#rH^nQN6v{s%wEO}%OGGVBLvHUcp>nK$_h+t$7CPl%WCmhjPa z4&*nGO9lYBXb{3qIKN|D(tpOd{{C2i5>}+Ue~H!AdI6!JI@xK9!Y#@;3l5|+8UWFN zQ+WvN!}-q;iogAh{wLsWd?u>^z#_P80Qy}(84_p2jhi+w%Do?;6I|u7$G?DjB6snW z{_^LkNqCuTe(x;PE4g>qZ|*!7X%Vivj*4y2d0~d5d};WparxQNVAO5eE%XP->R6zt z{8C|km6#hcD6oH7xj(%Zyv1pAaXb10^c%2AVy*_xaO0JbXV#uNp?fFK@mZ#v$Z1vP zCfw|6(a6?zUASR0f}nPIEe$-bBkkdsZrisT+$N9&1S*d^+)tL0CyXY5SlZuH+9_94 zOFDPPoQW|sGKjC#2Fds1OpgMJ?>(BQ(c#;{f*({2zRc<9j_%lMW@)PCg9{ARI6wFD z@aMn_p%r4Vr5A#!eaJI11;}@d)1h}%dUyDpxYHz01x?gZ# z6f5hW!GRrac$wRwK^o&R&sit}!lr z;2m#dzP*%qcNa;LO*TxB55twoK!Yhm%HZ{0IOjb-s%Re&a&#gzkB8G^K>qbW5$>4M zTFbCML19fJR84l@gaVlQNZ}MP_4rKIOSJuG^b#QQ$DV!zuW+)9q>VGp+^w;~3V_v? ztj7gbn};c^f5?O;u~OUyUGZZpUZZdwyqMrdjp3r)`1#CP1maFXs91&h7hkLhLt^WWy^hB6k6`ah5myfE^-1MbwBHUE%+fVfE zf{WDXd+enh!Dd8UGw+g&DdGl3FkBeQ>Xb4qRU++T740JF$MF=Q+NQ6w>1w;Hh4$wC z@swUi{zV=EnFXL8M9jZJ04VwHh+5V!wlV!qFzw%eaD*=+et;;|0iyXXJ>MU>xc>J)3knhyFt{w=1 zG(>GB!Z!vf_g~~~kW|XkE>ZgdnH|8EPr$~ROc0`_kS5_1&h+~Qkky_=M3p4)LBXl% z#lT}^fLtw;A$;lGe@Beh$`_F!R_3MK5NkL9hq0V{%imav%Qu*B?r&zLpW z!quz$qVBJue|wN6R)XK##S@X7dSf^_&cbY1QkDu=l+kXbf<3t#{3VXu=zTJd)(V0| zTfxO8$KqV9w5J0|(vz#Z5y-L9ql%(g^I(Zr42yA5;|vWdwb8C{1l-~p2Yc%EA?M=Ng0sZRJGh}5LmXwm9Ba6w-dK6j zJ?;QT!%4@a?z`>L6tQWh-S)-nAU#y=SW90VJ>IFK?0_OpwaT%RaF#;b+E;GYJ;e6- z2F)t@4?Ro-UGG%iLcm7>`Cq!OVgpM{f{oa|saM+D?jHu_U@EnS zi&A;*E&AKIK2fO~cYO8y__9QMVCX*1qOq#P-3W){Rl)1yXidJedbbtX*5x8BlA{Lk z{w^K~rBq_g*XC=FIlih*}FY`xU>( zl=-|hT{9tg-q!!p^h`rb(p=`7NmS_WeF8Q8xEJ@%QOGt!kMNf3VAo6vuW+Ib?YQhR zSD&0^?-%JhL8#oBjLh}uhaBsB?VKa%Fa5edo{w0|=e%5_8zf^T8>s8>(rzPKE3|pu z-ae}kI@?*5VTn`~1k&T$^gqu()4Ov-vVqYOmoxlPt;)9E&Y!+^j?^8W^k#wupLa_h z>#`m~IUN4#W2=_NoXXqVsdC~gUUS5%gVIG30k}pkfhwmho(e=P5Edve!AG*1I_EH; zchTGCGL7SU_&Ktsuz`Gb5}i=Q9@YJ^(9NXozp~)s*`_sWG3jP!#Z50`k3oG|(sQB? z@svVeBa8!cj|O9%^$&&nAo@KowB8;fW@Pu|%+7oEJfqetgm91WZ;~GNNQoF@o`ah5 z2v%JNeVYtZ(i6Y$dK27sjvYYiH^i+Q6>E(kP087n^)4~0t!VQ63wn)**gBH9h74wC zL=4w?2M51#%?T!ia1ZxBcs%Uk4N?WhlkZb8bQz4y+)PPFpZ*g3J=Boo8T3RTuRL$k zbDV~*0d9r(E&Ij(clq45AE1P2pwe=Xezu71q6m?VSfl`Qi7eX7FMfcuisU1>S;}Io z3q3r`@Y3jQinq$tW6dD;UE4^#`-seTEzD6w%JRYf*kXYEFWX)|&GWlkwA-hUnMu zFOLr;3?KRtgc3{VrHf{JM6B|p7l1630c3Zy19ccumQ z?$NWh=8nZ`bi;eNprsb0R;P3d9@850d5trPHnWgY?@6>-;mz4T1>yaJ>!eDr1$W1d zZ;oVvi8z6rR3m$a)d6m*Gs1!oDDjAceWB8RWse5XNE|{}F5PWzm>Ss!)ArJiJG&Gp z5dy`C%01$S1L5tfqXU^#T~7L;B0_MifWY?iUJv*lgoU-bkg0G!;^mj>MqJl((Zl(f zsrk#wCwE;0oO{9z(eAqJJAxwZ(RA!>@AcjYt`;JqrAbuoXv&m+ z-gcc&z2AXqL6)=P6<=`%K67s5EUSblupUdCHDtdMx4&v^lS305HyL5csD3z`^DYXP z=-F!nW2Niw3KooWVv`I+5(gcWca0#Q&IZ^=b3v15 z&Mec{`i9K%s>~Kv)G7^&Rlr+hUGy$Bjt*aj1rghsl$wluL5GLuM0M6h-YC8TVY z?0sXBLokS6P7{6-foDcE?*Df?^bOx;Yx#WPBJ9!LD`5aWunQOBf&)I*KD?xpDcFbi7)RvkIxTRK)-;C_#u+d)H30$vGx7%o5%c$WRUhm2{1 zZ>AMco<$<80ObjdrikzFQVX-6wa4w^Z%Jz@G}SUW6N<9Re!MW$d0b(SQO$g@>^j%^ z)G_}=1`SZ&P&q;QqC_n%1Fgtr_R98 zlW{Ig-%F_2<}0u9)JK$LClH1nOF}mjvq-PIA5qxymg3V}pneXA%z~F|k)||S)}q|x zA~odejZ{T#ofPw)c=B^2la#Luc+c2x7wM#_U?4PJ0hP(cha47OTgG^k*0s)DC@(7P zQni^`qi;yOs*8k19d}aHu1JK2kQsMh?(?gFFxVe`2hmK9)MD{3$N2l1Px^p5S!eG^ z>)5(f5%6wvk@6q6j}`{z6p}by6f`b}>%7yAUfQP7tn$dslL5Po?~LzMY!Oy2KDZHe zptd67=Kv}xhm8y}Z$5{5uH}Mvl2r z&(+)9@U*yDmLs9%{#i-J*avhQ5{ddqreO6$peb_B+2 zfMA`k2iJju&X)Pl)Ud+O0a-YuF7w-N7oi6vfi+HU&k+5>E=P+;6cwdqC86ymD9+!t z?WCd%!EH#%w8%VnzJ3XwnGo2dp>f_{RL=)|m-qh^!g7mmhnxzZT>^?(?k6+Lo>u94 zuF#!puJ)C1{W3}Ftq}URZ!O_@n*vwr?=-=9b}=U0y#xb$^-yZ_BQtys2#TxkfmCtL z$Mz?2!-FO$xp(eYNWg-dQJG`jAir;dwYDY-o`o2kRbGYG6h!dL(4={vf^$BjZawF@ zE4{N&ZJ~?2k`i0SPO3Jg|+o60_&UxfU@9egpkJS#-nOe**+-QRbV7 z7U0U5)q z4EN#{c>G2;Yv*jFsbWId_!ul70x-T;^Y_Z?}1?TnDm^c>&winGBQl6Sdgl;SyeXJ*!_YAY>Z7E^nq zEgiCDjnD<^+8`N8+pYj(8g&2E~mstl|-8WLJp2$vly?N$*;}wxL<449W3E2Td zEu#sV)32E48gR&Mkw~ztbWHWA8jtjnibMhWr{N_xYf@*x_V|Ptn>F+$C48z_{mrcl zwou8!g>1VX@r22E-NU19D>wSG3P!^)$-V)_e9MSlG=KV6ped5>)Pd0z=lncThrHg$ z0BhFGiX(1VDu+r;xK5mvFy%dHUH4G>rgmrLEjY87!z3V-Ml$|9I;~7LZh*93W`1vf zHkVFQGrLi&@ch$x1y^E32eO&h!$D*Y%i%`a4t~xkd;YK6)qwz=XJ=eOdy4yoQyPp=~>O%;}zn4)Y(pTLay zRBb=8EVQt-%4ft-%|ve`nolUCpMv9(X5qY8XswI00D=0T?!%&+ac;kz)%$p>Z_OKG zFG4%*Pn1Hx=}ede)xElHOgHuRIH)VRHm)C#*R2SFFe}HufH}6JHdw)l}&evKvV? zC5IQ*cw}QWtqX=Ce$xn|4#4xx!2-5@ln(x+LW_WFJ1;}NlH6V8-NgOOk2)Jwtlrgk?B}3op`C#&~ z#=Ks7)LgxhL&X^HU{XR^j#ux@E4J_8v8K6}ORMg)PBqL*TXQ55%?WVc-Nm@t*iUb$ zhd=_@3_}8}01JKsaNOSX3g65#+IowF}2#E&-OolH=(b7thSwA$B+U44$Z7>%ESl8gjM>a{D~J0F2lX)tK7&CRm} z#Ldd{zzV8e1?OANJlTYnyCaB%{FPe;_2DvUCqYu$Z0L0ChIE{?utS%#5}|UW6aej} zCmuImXd|8j3_uIA>vMpDN2Q*=jbJ{E4Y+0kU!fffOc}oyt?5{d10=#UxlWrgQQgjq z&}#sZ+IQDa63%QmZzA|@lGfLD7c0Z2I_x?uN}n}@i)1Wt1L_m)a(T)Uy&mmYJW`Vr zBA{gW9xm03FP%@#adAN^s!BfT zm>|$o^j_iY4=p)L88WQ7EbemF872DMm)OxUu-T!dhW|a6^>S4afLDKqTVarn20Wl} zpj9U1^|kucO@3L`u|Oj~KUi!w>wzAg%TS1Rs zO(EiD@Uf@Zs3&#mTi?l^aAQ&xOMxL~W+MsS0dkDVnnOV^7X9J$Bh)aLjJcFtCtSsw zl<<3|i%8M@x~~icR?&h%Kn4KalLwe{uLJ~lrzW;_Uf<^N0P%n^Jme**Avq&JgR%Ym zv4`&%zIVes=|p?rgo9!};`K$2vhOh*rm0FE-31lwQwK4BAxMRj}eWwV7#a2|4u(S^Qlg-f@aCS>mo;*vML~Tez`y zAWDK|#cI{lfZli%;qW2+32MAsu zgi+H!lRLOY0p9G+#*1~b|ATjf%zny@!n`{=RxCntR%=`n>9|Un?Kc_OY+FZQke}zK zh?<2T|MUDLf1Vr2TuJlSZ}&gr=D!RemFBkM?Phx^^`*9pYw`KNiJ(NC_p6T-STkGA zNWoI?KE$7?Zst)=CG;{yPX7X6&m}gH?-VQups=Hp7SUY~M5iAbFMvbwX*!k+1Isn_ zIl}iLuz^duci`R1 z(qC_RQ$vpUycAYU=5dNMzV z`ai{7!b~lnG+V^N#gopRM~hBwe^?pA7f~m}x{8**IZ;_~!{rD-vkYG=BY0s{jXl}6<&)m~t)D^SRoq?*M0 zDwdGYa7X>;duacQ@7Mo`dl?=f-YEb zybNh%_uCo(XL9B(2tdRH#1mfxKrDBz&&JYEZyhE+db zHX+NdsUu&Kq@D377Na`#LDt1=03}NDQUAT0C#&nJpw&xh5iZdFih4| zf&hw$>}QqP@3pSo$G$(eBHoOa)8)wUxLvW|P2vl&lIWC=SF34Ap$MLCgO-x2YObT( zvbmA_gN#zqIF&Vy&XV4~HFW#h%$zfmAr&Y6z|^lyK8i1;x9({L9ttnzQ9%**1!_nC z!IEr(?#v?-{t8t+ZYn8|nhxp|4^ z0E|+=%k}1okyXGp$qQ0F^oa}n~NKR{#chKLP6frQO ztyTS6XPXCJiODvbd+DA|>2_H~gC!WOFPa~_fn^RcU6bbpYiPF8;m6{ebI?>5{Q0vzzY17P_*J5?duY^(NaH54gOf? zz11QThAIG`hj$kWarZM!)wcv1Is3yRrzGAJ?6Ua=m(n(OQM|%# zux4Q)E>RFB#4Q`_Ixjc+vZ`V`8Aa0NHJIk!lhB1>p}6YvZ*Y zK($Fc)%p|u{_j`T{gKge2jidfFJhhl62=F-$A8f`LiHb9jQsMSkbBcR7Qgx%2~P8r zk|pItVLdMgQM8pe-0m^7S}FEN#2kmJGvs;7#)-R@dDt)rFdjz^An8eo#-gw;#ZiV! z3YdV)`5=!bOU6^GUYFvBmS)Mp0sC4%b$bAb9e{fT0v-MKjQ}!{HB^&+zrFSy+~7mWlNd2piowq9!n92W0#t8qG^?9&_1 z#aAhCuCn^&I==1)2pLWF$htPa_|Y7JfwA!m?Pxr4jk|AbrQg`yh@JA}yQ}-0&Wnqc zrWXg6tj!t8s8g}tL$zleuGkDEJ^k)3@jlBmYVVs5L+s2k_aM7%buX!{jNkkBspA>) zJd!m1YVrz|H;?${M2VEnG>BL;?w%#81C@AcX&xs0Sr<=4M`MXh8)o_7Mrj)_W%kG= z&soQOL@6h;995KbSwFlf#EbvDGTAoK)|*(i&V9S-;m({!{w0_`^yN?FBFtY)jbr2U zA)Wg`HIgn+Xl=R!{!`ZI@44^aTFCyC|1MCtOb{Ntfv02D{Jpts9yif`n3+8zjOC5& z*Em}L#!p|pj`1}*nq-gboVU7H6LVUWokMlfj9|o1a$rUmD>ECqYOPYGsmI_Em$)*k zVz*wrFLYX5`rP~uk@rvzK{Q`^#v15$l7!lgQA2E~VBH+eVmTpoC{adi!aPp{1LSc) zlR9kMBeglp;tUpQ=KT5jOxmL*&-)!~Px!an$2|>~_ee(=W|PXH{ILmzVO89B46%{( zN{rc|XpE>Hrv-(NnrnJAb!S}D)T6-xi0S3H=}P(HhTTp^`_~!)$Wd@?Teleo^U0Pg ztcoOytomeiMNI}qVRNx6Tb=!=l5JbfYM!RE4mv(p8TPVmnG)s6X=asGlL=k7!cptb zZbEUVys{BNlQY3xsgH)OZL~OFj`T#3hb2T{J>`d^=Qc0!?sf79rt;K(l&p^0E8BYL zMyy^ztRwJJb*ea}^696s6T9x6t)X^2MX|`1kf(MzVq!>2suFj6fd1LGbSK*e-@oU) z{fv_2lH^2nKy2Z~w`jGC&XV8w^&^GL;h@A=xkZ>ZxVpB=BP5N#hdJ;4i^dw7xAqw* zn7IB;By7%QMd%kE;%}*w-r|87*cl5jOX-ZKoa_AOFztNoCdVcU@l#$qQy1XTsL7+# z*)I&2sFlb0DFsh7i{q)k==+7w8~ewS{a@ZB0U+=fz6sOA0MxT{0mj6H>UVqqIsStM z|2qTjk5n_9KUm9OZRwwG!~8GI?3ec&c$3NhDx;}x^UvvSKwSP$8Nw>u+;MSAK{hnu z^B7h*3P566GC|oVx#^jy#MkW)43NNhVY_Vd zqO+rZ3stw0`jbx6p~{0)zRve5zO5VZT6MjDa_}_D*WTSh=akiP`OA4)+@jm=TOJ60 zpo}WhOX39xeq$@bSrAA4sKNiM^G-1pu2X@``G4f35Vd7(?JsRnVRxC zkft^lTU5$O1y`z$k2n0N3>zEUT#jKGQSRA%AoA@)Ud7f6cw|%52dqfZj#v1U!v2n% z%~#YRh8%|yqXX!%iAzC*-j2&6s!F3U_U=vmv}c#NmN9qx}^5{>*(?r|gBz*QQ+Re1>@$UelpZKtJfp zh~(uG(Voq4hP>k6lFWZ&{EehOOy=}HKAzp@IdVg=;JS{Qf8e9_1n5{#0M`6p5Kn^{ z(&5Bb74x(6-vChV!9O|ekp5r@OHO}gX8)z!1k!JmMvb&%Hb~m#y0X4&l5A;ZV=A}B z#K<_&KJg3cxEBN6gME1TQ|<4071=!0(H8{9oUDx34l81x_k|7@$O%c^jiETk$Hudr zq88y#U8(tQe3AUpK9q@IHc{F9zTTiZbqzD~3)@U#bsh!uw4{qYg1-~4d{ zR9hVUcFp;Vv0E4!UP((l&x=z-@g2CjsJE^Rx$>T|0E^RY~u#lXsL?gRdeVI;0=~=c{XrJ<+6f8(q z!~1Xj1sjR=NQQ3&ZBSf@tdp?HBKh;WRHS*Bpmq$%6kVS7?sg-UbZYq@GA~UPqwTEW zMa=I;<+S6tbd~F`yl(a%g_v9SJ4#~ZU(mD*J*%rjj7|!Kpv8n}o2ssbwo#dgM6~)%EyOgG*jtD&y>-)MDpWdOduCHRXLIEsFo$xq~PX(a__r zRe`!hNCyr+MWV;26Iz^DM#X}F;c&%J0VOn0P}Kc^hIF-e@ni5rcA=i zkFzJgDWZ-l$K)K*qS2-dL%gDbiSr-^v?RrU-46PlrLZhVoY>6!i zv-qc6L%*?i)mb6-o@HOwt{dTkH3Gs!tY@FLhqLg{48{@m?w-(h8^uS@wk_Mgw4WKR zo*&R&sSY2C}@m@sL^d)JZ%v8gpLlbJbIUS zqhEK+!6V4BoLNFITcC5Pxt{KWYKwk|k(@&jK~DG7)@`+Pv`m*lMv>!MV~IlM(&rsG zbDzArH!4k8-4qt=_*P+A>)gGxyM4p8z~;zYNPjJ&maN+582x7?(>d2d7><>MK>7jt zTxe?dNCQA@|Dl>AY6zh7fH6O95xzxH`A4zT|6h|W?f{rwWSs>$k~+FDcuy}7yW<&u z@?n$0?=lz0zhO74w)}8@u>M#gOdzN7_=6#ZBnhh2tLB4|%GRYLg6!)=`;+6A;cdIRDZ?JSF+Yy8#l^F99cO7Hejye`R^HzB=qdq!yj9^Qm1!p^~%0h(22GK_8ZNHrAc#);t8x~KQK{w#G z8oX6lXjPD|ZNXz$>Caw58*3P5FZ-0<2Vh;oGMd1ZrK${^9>?YP_}eo_!WO~@z2olo zotK=9t#RU9Xg#@#88QQsC?mVeagb-rb=k)m3n(7%uVqnZ2hlTN9_S&!$!=d0`?a`e z=h5~PuGH6oY3x=|Zp}NFP!9hLGyyBHLn=e{7ypEb54>~|DR`^zwwmcUH)&ihK0e!V z+3w*=B#k!A(bv520@Ki_}kPtMNc1}qJ!*)rkdy`GR+G%pml;qQ?fpSqExS^1K$rH_-n(ccR#wO1d zaOt~RH6U-#Hz_V4^8rvmVPbS1Ztr-o4m5p|bs`_a9qF#j)g6h`@nu7Y-8(DfqlLNk zrpjyUtmO)u(J;YUf@yRAsa(##AJG4MQaJ%D`xB$R*DAx!n?2v!H~2amBnPG@sf@#U zkfY_sYz#&nuTy^S0u7u3^dOxA8C@Tg_sQ67;wBgQtz3GHBV;ml?*czQY-Y1JjD1Dl zz=n*;ib9%xBhi5@6d#@xc$WSj5=gUe-tgeCjr8Ggyi9~x*-48P@CmD_4q+E~!H8fVOz2d4?|SKH6_u+^h5p z^_)W^N$BLec+4Sf8L(dN#{!&wI0kJ zMX9!KU<&2DjE_~Lfl9dV-EbUHS6y{vEbZCrS9E84eU95p$&6-w{jJOYleH7$CD>L{ z{u4&;1&zqj{B7(!P*y=KJ<3d~v8tV`{}Am`Guo07BS++`sw(fY0t!}yU!ld@yzLtN z`S|z((G5ao7sgNFG*X$BOOdP;1L_pDx8q%Cc|hh>{YKyB_%>Pu?Uy>xa8DA%$PZ9Z zK-;M_@?Wvj`wVH5esA>Xk4riPvU#dQOIB^zSV z{VeEgPrS<1rzI~f7JImBPnnk|Kd0KFgu7+P->qt(z%+~aSpKYjB9WvYf1M4|qK;jX zNW9Ih;CMh((k?J!wSa7LK)WozI)!~}@nym>f>?i^aUx#f4x=b~!3Ic*(IIev54Yc# zXkSK7bb)z--@@4TGoMl_H9F^;wXdTxGuj8=onzEv%s;l*h|j5U^ZEDA$$!oWrrV4P zd`DZ4Llr54oVd@la>ViY=;eSBYe$Z5=<`oG$aYjv=}re#`Jys=9hgAvVqY(Dd;tlc zD!#_Js!eZC;)tLV_jj0$@)+kleTT($bl|d1+NmT}WF8Nun*o|kQ%U7fWsrfc^4`d7 zGr3bpSELyB1m!+u2WxU@s_RKR@7TII9j|o$v@<1^@7dV8R+iLo79s6%&pzDzOqzPO z_`9PzOQzOEOV8*o&`Dw+)~-`$Uoqnb1-^5*`UDa<2ZzIl7z0U<%U;AbK zd8J{J%14|7;Tyy|S(-jHJ)(F}Oiox*1K5M?+Le6!i8cxwasew8+jA&nbf;j74omg z+vo6@J>5eAn66v8ce7i2PwCGyy!kqb$#pib+e;+)4u#4;RupPq*sbd>UZxhEIcvvF zuQGU;rycJL-vmz{m=SO%!s=J(zzZvyMPvxgW}-ux0R9hlIVe5~JOhIHhJZ7ew*ICofLpGsF;Cr%8TL%OOqdQ5~QZYPO*-pyKeV{?)gg1wZ&Ol^K`Ei{^u&? zynQs0nVfby>A)!Ypod|4X-ne6;{l*Kzyh{NzrOp^rg!=YT{e&S zeXr)&Kq?S@KR}j0K+14nud8mep1bV?gJymK1)BX34{@l4X>Rq}rxDrvONK>)bU<$P zH-qxmn_uZajS{kT@UN5qFBp)&9wY770x<`5$PB`%ai(#?=C<>yu|D=>6~;H=owdp_ zVhp!&B8Zb42#s=c!4Ca^s*cML!z)Nk_BlUWBFm&#{$X;hIInQ^{j$oTJyU=nUFQaY zVN^L9CCcVo(7|F!X{BsfK(y%XpW@y&t&NR#96Bhg4Nkn@f*CuaXDyInGGi$4+xVU&r*-OmTX!h#v zjavBHdof+aI0+M76qaLzvAi0ha$`D3oSN&QjzdM>kM{pSz0@+o25PKS;euU46_~!TxpW^+{oL6x&>Vy5RYM8^L)o@sOl~M%i*$= zY`GHO=?RJ_2~W+R)3UY-aaYcxtR<()fq&T#8>!Ug;$|w*a#Z8p{qdM8 zCj8$Xi@4eZ{0a*0kMZ|@o4lsn`ocgg(k0M z_e#Bz5JKJ%?tO@>^-ib0j1f=-d~KBKa^q$FYIabEZUYl)pXALra| zt}@gvf>B2+JdF8k`}p|`hfA%`r`dHhv&)zRMG48vcD13?`+D!ntDK`<=LrooeEGs} zt9ND>N>{I4&O96RZ`=<^P3Eo_nfY!OQs^Ep9FfbOC{l*{mSCm4dh?JkEosa6!22Vzo=x#LjHXJ@=A}7YtJ@G zv(rg%z!smN7CRxIs2ytF&8{Vre_z9H@>+5qtMTv10aY{@AcL+Y1|*~iXZQJ3_&f(< ztVZ>t$NEP$9N&zWO|G3%j}%!`)&}ZI?Z@9!--=|8vcbI;Yuf(WZ`9{4G!?wfF;+e?4uZ$G4dUl%D>7 z^s3M}|0KbA%tG`7WCm$bT>?18WLM}BEc=i_3%Ny?!y>tKY~hu-+FVm1f=6cdZjW^5 zv+`fFZ#?6Z*;U+Jx0zoREbYL)w|XP`)iteUhO?rffepIt41X6sHb_a;6f^gBkv(Ya z3E18Tf9;A)kbWTgD41SR;dV0MAsoC<^zM2tHct`WVM8cYDL5SVvuR3lHu7Kn)F>1kY{} z@N*-wx+bOA8Lqu%>Y9kV=A=}OBLI2*!`Io}$%#oDD?~y2My46rfqSe~pZ11A%o)tL zbZzp)wA|E@x;Av1{b1TMv(C@JE!%s@*)ut8YS3~B6{znQA$42QN52Rc#@iO6KQh9= z>g#GjVIpb5V)tw(kYykjlMBW3-?W2GqWG`PSk*?fJ%Q= zdl_tGnrL~eqfp}|STQs+Iwnz?8MeFWiH3JoQ3_e%AFf8MJj{v_$fhx5<(Ue8LN;&d zxU-<99>DQ+xJ=uE4)n;1O~EJqGTh~X6eB?ng+r=HZuxL7SCoC?%Y!=CGcoRaT?;Qu zQ5io`NRw~oZ#Z{PalIPuSk>}V%I+t!wVI*FJy&kD&L;U>>zdSYybbf5 z)n?kePuu@^{B{mxBTT`2j8;X3G&==pXy(MH3_M zs$IU&bHw5`3gj>x(m9!03v_`4_T$VYAZHA}-D^1Ih|ZSlk*?-i8`0X0dVcz#{b!(AW~L zbW~}qsdJr%#|3BfNexqZUBB;Cn-c-EPA0F^G8D3TnY8AA-L^KUN}VAS9HU@h+qJh9 z0&^FUJAW6qE4Dz8AL{0RuXGN)rm`uKU#?3w@%%*2@(Fu4TomkfLO2oX^(95@@+s5z zYf~5Faf>>tZ&T9E25$p5wholblcVDzbZ^22@0jxywk&SdlqMLBZivlO96!hG5c#f6 zV|$lJ7{(e7t7~l$qWw11Fw2#37PMIxMHoykJhveG*h~fb`lEpJ&L9Z~8E&00NB+|g zyF1An(v)C7P^vS3W#yebe^**@8Lge#fY+1$v+=uEW&J?OD^iXL5&|L?j%{C+3vl;=UVLwU*yGR0(9_Ms&_zO{*sX}`u zr+o7PChmSm-Sfg4Yh%VdiDjarlyvyrN9`?qk_+D2s82o?bH)q4%XvBJIn%R{B4l|Y zr@n$3Vvxlc)cA>=c?uqNClfiI!S~((dH@<(n4MN*X!*Q>Ftw(5-E-$UwWW@t$fo`0 z34s*fB<%LIYV=#ZWSpfrHNg{DGi&}D9kGa-!97dAP>)HAEKlKKOBKQI+{dVD^VDE+73hoi-$V(z44PIC zVX46rsgIMnbq0aAl+y1>>dKM2Qhf18_~`J>dgB@w&uTDd@bi0tpOx_Lsw%z!A^U)` zmBg*x|Yz6ZTYmwSWw(iO(CHfh)nffj(SE~w;p;&ivb<* z`0Q8r@{OI&ayfR)_Oqpw1`PcUV&18SRZ|jJl4WxyL00M^iX|flb;Q|Td4iIA-fwDN z3N%vqGH+fi>rd7F?AkaHGs$7)A%(83sJb*Yq z5GWd`5P{Z6z_~HWp zj!+$_nzK2$#C-}cY0E~LLFXbX!;G6-?eW?1{Jd6=fv__HMlJ^Vf-Fxb<-Wb-QwJsO z3%?bvLi-4_FOr7FXP;Ethgs!uUt5t=zs$@fB#wLJ}G8cz>&CWl0u(X z0JS7mc0h`2S~AI!RVjAWvXe=D%$5?nsb-&ApfV!nOoqEM>}3R9fU)HonKZ3^{+}qZE48)_4}z?3f?`CrQ&qA&y&C(!@*1 zIQL|g6gr{S>XDKyKxbI>&;#2yYjjUbkeA6?9VI^>=FrOSf;KeXL$52%GyMa^&3QHV zQB7W;sNk|_Vv`>=!V7EHs?^bLAlqBfSoLFnSlbB+{mPWioOzLht4~v{FY3&;$fb&* zU8l~;j@FoJFD|cheWCq&aB{hse%xfGglfEe8I&gMZ5Uxx%nR`z=nEWCMbja@*_Vg& z%;T7to&f@zwoA#_!dk;$GG=6B+qY9H;p}H6>$dJ+C$96I0h&U(50JY4ne6rNOZWVn zibBXVZ9)XYbxP(HHFcyW@pY~A52M8fxZ>2v-Z%F^QAOKrOK+Ht)M7t*(yZL4C$8GaO?Blh z(h1X%lP172fTA^Qj)?ocXRDO@=EUe2LHJ6PC>5S%y;jeqP~vD{FYe<{4F5@bjr5n! z5#_(OmH5+t6aFGd&+rEcra$S`{g&j#Z@vK>_D{d{;M^lRUzxp!O5;faZd>vn&SYe2-xG=HL}8Z>z;T9G;~ z=#_MTZn9|EtT&GEL77#f%See2W~Sx6B-bZyWW^w)l+f>>$KQ4Vr|>1X?fXMuDQ?37 z0u(&8D~r@xwa2t+7rfe!x-wjMX~LdKyw63TDFh&Hr&&q^s!Bq<6?2K{^SV8zs=OiW5>P29RxJ?Ha~5i01_YZ5n7e6xkmeu7B7VtPBb?Dk0IX=D1? zJ^s!e5vDMS_&xVMu*@%8>hHz;o-ui75Hx6}`l_SOtuhDvIw!6*c8+j=c%&%Mad3}kLrSjj}VTl34i+~mf&Mm_6s{j>UniYkZ*`dnLN zGgtPZV$;r&>8WfiK)bd~fz0{%35oE-O^<8h_VBe_?U{btoSm8Eu|O=2=Oph8=MCRE zhDw02{S8dh9H$$3kAj0tC+Q|PCq5=^t&3X4diVA7(>B-9J?)A}LXRI6PMrb(e4$SQ z@%h(ZgoNDBE{;F~y##?))FTa$_}LdG%Xn*D==*VR6I>v?`0;zm*#h0w$G%yGU@g6b zvANy&XWB{`vtcSEXOWXVyYvVt2(d}4zG`63AHrr|1Ybh9{b=<=#Bk|bQ*L zjNs_+gT0|3xwPoA`LqiSKTndjuNuPO+9(ltL2_>(P^b=hEj=%yuoFQuo;FsIPQQTaH!6P2>Iw&{o`EoSejVbLqHcH?-A4yB2~ zPUd3RTxZd@9*_vz-;C@3lyCX(7}7r(PX09{-q6te^5Xz_XqL^X->yb9m+~q(+HXv3E=Ark(&PKtX~D2wFsB$Rf<(jyF}&sl5nK zNRQJtPsD3?C+K2>s#$ry+e}&w|2$qP)?$+MJVx1|&O=1DMB7W*(ZSX9MFr_I-1P#N z*;qq;2`^io^nt^mHTp1(e#5?%5)6OTK9NZ8i*hTj9nae7cn3F;nmnl^zN~vW-pDp~ zl9*gQ4&a^C)8%kalsT5HV+@3gqz^9}wo8?lo9uM3l4N>7{fndbTQbOd?u;Mro8Aqa z8^s%AEkhRsD;wK1JTzU5Fs5omh?#?1tcc-PH@dlP#N&Smd|m9gsRJO`9{|VLuZ9A1(2*7T z!D;3Ni3idIP3R`aVE)uIH4+2?l!6(d^Xaf2W(`00MFNd!1y5>0f`ZcM#!BTz>$+vrZ70wwtfMi8Gnl1 zxD@>dW8*iPjn|N&emG=1(RTXZNF{+Z=-d);pDGABwqs`>=z=_3NdxAus9%@Evg89eHSScD71w3DXc%8h< zG4q;I#6uUaELmGb_hpu8w@R5GE@wW-G={kxP||LeDs$&N?c(E&x%)&r|5eJay#Hz7 z(-ak+g?Of_b;>lyJ&(}{y>}NgdfEZ|*{AdkNkSP8O&OBsU#Kb_kmY@5$br37Q=ohJcl}TCLu;TjdTi!9BKD_%Va(Z%^E(`vaTt>rV@8;|6 zfb7B4Cy%Sj9xhdHmM`(LN4W&-P z&${>s)MTwsYO3ta3^p-!R#sKY4B)?Z?Jsq|zjQD9*RK7g?)TUJ{q4z}$94^D!TF)A z3O@O(xAjo$L_*qJG~aW6@)T;(px*P^!#jk{pOxW@PQD@;doY%~q-i-Ex)iskwz;hx z$O?rckEYb+`{`bVrxkrk%gK=k@9it*TrwHHMw)UNEK)T1_(t>PN#YZ4s0EOW>AdCN zjq=ipN?f&7l$xfD8;5O8pbw09=*IZ>Q>qPM_V(b~k#2vwU(sK>yr@%?gdfMObPw$D zY3O6)CIvEeY#+~1x>t7{acy2wmgpZKK{@#lY2suVpFGH9iL*a{iCV&8@plX1q8FVf zqPl!8H+`+pZx!lAQ_br6JJ3pG&aOtw_z3c>1k}J>$yv3cqI(?0R;cwBH}Z zibqgBC86+au*a2~fzy2=&??P}TjKRg!q_wO_H)R!;9sk_95}b%w_MR@ zuX+;uWI^Ml48SO4lEm=K$>_Um1k%rk({VG~#jGNVnku@Lczde9`^^RZKoTCqGuqG}7Hw}%H z>bsuM#q%d2B_s-p8Eg-mBvq>wZd%AjqQ3Rck=1r3?a`&l%zJ;;B&MS-A2(H#@meQO z)2fRv-MyYf_Ikj#KSv{jWGdN7`BZGkl$4Zs!<);^%OdB>H5I!#Y8#U7xCNpkGHXZX zUBOBw6TD6}i|`zYb}6=Y_^M=vI33?8o9Mn4AFTCRJePgA25-gjjFh@qk7uj)3Q1Vn z*h%doht@TLra`m3j6R2#$C%X8@j>Yt=R&p2?v`ZnYYaCw&Y_00K17qOa<@(>_M-d9 z+#1Wi!-nb0W4q7MLz)#Rc)^OzsX|;dRW&iSeWL>(aA7yOpa=aM77^d^WaTMd9(zN~ zLk25z%_7~!gjwVWQQkI9eCEr?Dzkxe`I~Jd9@)d)AU%b{&t>>RcRbkz1 z(rvS~e}Gnqg_RTqJ-zI$2BESQJWV>I#}Uy4VdZ(DVS+qO%#2I}&r}~tkOrjg18bDJ z?or^>$%>GULFoX;NnhnB$H*b^QEGaAs)izZ4{z5AMQek`M6fwhI0E>SKiUb$s6xfV z&E8<;qHF`1Z}whbz8G4TJVk0mJIR39lRAPp5KLo+Gi)yoLX9dvo^B=P@4_T;C2yTM zvMqJ=4K5Clu*`hVyk;ZrKkgpf^BmZH;g{VpXQlgrnttF$%1^>3G6b~ZoG~R9P%Hmt z5p7S^`m~#@tE#D7{GLff?1kKp2$){-n1OtF zqAc-pjSXI2as{zCj}#F$kTEon=374XvVI{oT|vMKsDyiU=nh~YP zQb}IgNX#RdPmvvJAb9)oQ%h~z_`#+uG@V>JA)tki?F=LA{S_a^Bj+?ngVBhyP!|y+ zMY|ZCrcRs~1Up2Q2xVob!QM}#7Cbb`e*O_G zz36V&eEq-~N~oo6g+H%cM+%Xf9R-!oCeeX&ye35`@8O&MA@J;YnpOLRV}ilz=2!(a zngBVy?(5f45k$K3T51A_*nI*OdPfIUJj1N)at|0xNOzj*O3Jw+qTW67TIV0d+vKgQ zURsa^+XQYV))g1B_{e`oT)|YS^)yQ6=}9bTFX(*5sPl-?KF!YLROn@n35$G>d!Es> zVQ*s5sZxdi-1T?9V&)P{cH8{w2E(I;XU7UD%nhV?uVN&KP%4Wl=4RTyV4M1OLPL{2l-OehcAjC~^o%voVPv7mAnbYR;|HBT%nGk)8+C}+=yPW`s|?MZCn z{Gp*}^}oxGPX2aL7vmR`b_F$Yj4T_0`gVrUm#^L)1WZiVb-yk)VQQ>3++?=R9@Bru z&@HV?T=$!@fzJ6{`hx4ROdY3SsbyBs{v#0Ru2b#zJ|g^J1o(B^{BPUQ|F2mRzZ>xe zcV}l^Gx$~V*2mmxcA7SuBN{7avU&f#mIOdQ@?SSY{^w5qYskFlHyiJp5X>0@zAM>> zZ9zsc-R_3l7DtsmU!W-dAj|o60*~F5dHn$rzxqLz!zcceEaw-K=(kDw<}Xw`{~OQ# zTcV!pHIncm>RJyyT|Ulor(hNbhl1(y-#(fzKZ$yN85`MOO~s!?J^y%B`2RTh?-so; z%ozPBdIubFo*zZ;)j-jE@%L^|O%UETob@<5jP@8K1MA4F{M*;px@>HOGD8hITS$D+ zgpykSB;UMGAb$EU%&OHh52?*;gQu&$5lf}&*eI|F85n0gnBJK;6_*-$;WBcguZ797Y z9$I!KLgkamkBe0t28M=mSx`{{7UPv;qY?}b{jM1Ca9mZYT*r@S04+f==rib^ACl-_ zgmC|73ZaSxN8*ud3;JIP%(h{~frw&fVmifQcVpy+jk4dt3iI%|*QO@4EZ1z`jFMdX zbeF$3X|2|Bl{O)a-s)?|?AwEcqkZk)^C)7u>6a5}-~K6{(*f(H|G z`R}3bxNp_Eq|-eFtHgCUSNjstfVikDo&-UtCn#f6TclOECI(G3WzaJfZ&bqm06BKg z>jMEF`L{LSOv4Uv#bXBacu#t;7TAs}dmMw+kogJAM3`l#*;wtfkA>G(jJt74t=pTk z5s*9fu}ki2Y)TLjl?R0t@LtnyCFMJG%CLH!#>wXqDI8Z%n1okPG}{6I6Hm+V7u8o0 zuLj;34$oj5H)e2((|U*~S?=gTLX9>Ln&mkaG3qMr)d0Dq`psC}XHFka;~X3f$#4ar zV}@sSSr{xYrwaH8VFc`NRx_oK#e#{?mu0%A(}k6tsVNy_By960wm~i+e~#F%^nu=d zMY1KmWhyMJq)tj(ApIeh<4=7gpgnl)>@uV`vG>jaum>f06g!uXH-L=jV%vAzoH-HK zDeJCJZzjTs3+B5&5pBE9-#rh1b{c;8rZq^=efwx;6+-^;-5hp7uu z+@bgk2~<>pnlJD)FoC*oWCo`i0c{^iZi7n9(m1WioU-}Tnvdi(t!9n@KdtKfn9n9qx<+C~=ca;L9cXa= zIzLbD#>Sm)uk0$TIr~G!>|T-jB(GKOU^WvDL<6?xx+tCqpKrlAW88=P=}hw~tPNQz zY1`CZIEPULkwcEX3N6x@c6y>0XzuQ6)~YR&9xiSZ&L581gDTFHWF|Dmx1yNC%|I!k za~QT;G8(AM^eoO9f|`m=?4)1!xD0ly(VILDjHCl|wKc@MV^Kete~~iT5NZ5g-kOuK zhOU$9m4ZVUPm&w!PuUOTmq8nA(U#-W!?cqrs~;C~^B8y!7R+p;cx@57;Iu7~w-TGP zbDtMz%+O!?Ej_l1*+?K_TL9dn^74!4)ol~YiYdJFjZ8?<8*>Cmyz6iMa}Ws6yUigT zE~~~@a6mf>pq;a>`&jS4N1HMCER}v_mipz6gB(9)j0a)S`GD&tSUL8UOgaT5$!KhJ z%}zn7Lg_DKmhxz09nc_q14px=t$)mNIkYu`=< z9u}qAt6H#LSJZYy%C*$v@(a=?xrlJNBE6T9wq7B!RZ#ZT$`#6|F81MEjCnK+Cz{hE zm&r_#jjKs`J?!KVJ6%ex(f-kbuDq4K=UA!P$$-K$zgJ=95(t#*KrJ?MgQN3>fco_H zJoHweJF4iWRjx(9N%6{MdCO!*{pd!s1v;HGK6VOr+ajX26E}@a6TGH~N11fl;TY6b zTp?r9hdHwIQqO5wjhwz$MN?6G@YeaK&E*u^`HB@! zz(O#pJop))|CMdN={M0KKf20ai3a?CWuo8tC4=;9HtA>kU&v;nveQ46PVCv@CxRVn zPA^^coIWvJSgF1*2I9{Hpo5#4x99+OSjpr)*?s8p={(>tLXL9&Y?e&EK%%HxhK$<( zP=W4<{J96~?Ku#o%)Djzv&nRZ`>)-o_dNhr@pQqWIk)EB!GN8a2CQg!R)(U)m%RiLe11WxK8)f@3+$G(>y&&tOa9p3i zw(vZ|Kso37hgUG*#fV+1{@K*rW4IqZYu~mF{ULz9D*INV6}0*nB|dd2$qJv z@pPX#0}5^Sl%xSYPgP_M#6p^R3ax#q9c|Yi)D+o`kw0YPb68cqUrmKx-;dZcBxh`_ z0ODy2Y{i84HS=l>zH$IWXtmW0l&K

    @c=uVpPo*Vy<>5GcMKM|oPbGSjPH>K8B0LC;6S%js(axcKJA<)0||ZAonz;Ss8N>~k4r zzWZKAAUpP?<_n#EiO5NPgI1V2Gyap53?9wb98mrhkkAX{?!QT_{f}dW{z?WR0ep9V zUyQQ+AK8UKR^msln<;R*DOY!?yTHI8dr^g|C#HPcXBcYvGOKRey=Pxri0g8>i>sx^ z5zu?n&HX^*(>o%hU^mx^u{e;3mTQX7gHxE3t*lbxa&tp8QWX^{FmPVuGBa9PTY6a_ z^W`Mj**?wWj+|7U?gbOoRWEenp1@Al zJg`k|d#zXH5jP?*G{Z_3+*CGj{}Cn6g!v>3>xyF0!K<(bu2)XYA(eHj4Gvv?PRKHh zhHmJ2Gq1aU-8G=3v4g~ek5Ob_^zBJ8GZGrKd+@b_TY6D^fXsXOHV~M9J%vEg#CFH>=?x&+QZ9Bm|8!2ViS6*B(DLdd4`0@;7hZeBEAb@4IR@~FFS^~fWuPAKeh(ss0Y)%yGgj*U$ zHdGG7`pWgH3%9OXX7av@n^=Fv7Ae+8Er<=TklXmed|Ey-;3)J6ez>j^_(J3P<4SZKR7Z!|`B zw+8ZnZ8kSHG5zP6^cHc3m77;7Kc9N1K&?49VI{${3DPbcVTZ+>C+Xa|Mypl~Wig0X zhSAq(E1n5jGh{ca;2u{z1Yo&GRkvVBiS?3MPa?RvR4tSBot>dAkY;qFO87j8(?F!2 zp-*29K!93EbSCnYs76SbZ{B#YMwU8?)4pstF?bjzWde#n$?!BK*IzZtK?!PBDRwte zcOM6bN;fLihs=3t$B8jI1som<$@hwL#36aghSYu&d-&DBdM2Vi6g7<3HeXjEo>Vc= zB#F8l6Ip(R7_=@*cG9XQ&a+KgnQFA*lFd;3GY#~rSLfvD#oN0gLUcAS= zD0;CkxRi;*ZAX}vZ)(Yh^l}V-rf(A_;%+gY@M6J46qa9kMO zjda*c^t7sNCp&S0!KdrO(J#uGdjK4Uw{D5V>im2gwXsgxd=1(trC~IKymCjN4Bx)F z)^}<*N-~0w!dOL@e9@9|w7 zRrt>Sa4=gbTdytne&kbX+PZG{9_$p7;CrXdr1Aa2r)gi1(@idOx``q0YA(;nWCZfU z^aG)qPz{TPfrAH_#}Ti&mg<;V4$=$u98q~lEbZ2`F)J5j;wfLEVj%c)`OEn1AOr); z>kP0do=u7!F1G#9R#07q;99|1CpISE>}-WGloKx|t*RP36$?;O_i==xc&@av$*wL^ zDAkUCA4C@de{1lbv1EnCN&5?*J(0@arbcv`>lBgLix4|{dn*)2A!d3`?A>JEo0@&_ zy4y9H#mxIk+;DJ$r0ENS%V8)S{|mdmO5#>?edsK`;xlb*PS}-h`nsA&4{q8ciS`mW z-f>dw+GRP-1mak2PWB*K2NOT$0~r$C$$;8mgX6y4Oxh#%(EVlsd+zCLbnELhd!DXj z@s;_xY${|m>i30%t>+yP*l}K-_HS=hSEI4-@@?s+MZ{Ens+J|H__ty*o;Sr{@klk0 zL<JAAb5t zGr$&Oc^`(Scw?dT(;fK+K(Vv zc2}Jb?6>x}E&!@071SiH2A`dP0IrZAAdH|y@&Ej(ss9jyu0Gvtn58>6{8-sECC5z1 zM2pkRk9329l-GkgDy0;`xy+b`x6$Y8L@8aQMUpoKe=_4qrzgtZc-u#-Am?^BD)Yw--1ABE254Lt)vBc zH2L7TpL!a=Df~K36qB5-`cF!{na@m5lx+n>b>?;0fz>fKJ=>j2W~@$W^R9T;!TZY3 z=INYM_=WbCLBJrE-cqsgudgiU*ICJ<3h=t(nOdRBq}He@_B9!)tXt89cSD`Jy2*$a zVFPISY;<4xad6%HKm$fBck%aebjC=)FU^L;qkrH5uD1QSUP@l z;B4}G6II%7PugcU-Tsz_5g?{7xTPV92XrLX!B@VJ>z4O7Ur&9F+M)!?(c3xY*~4vV z6T_QoJB2D)hI#-sw+wl@5}zj38@^jABPS0E``(lk*KOC~wzE;U(a4lh5wQ92erYoV z6eekFyBb9k`<6wMrR!xH!*;Yn`|wmE-)&VGWX%gMD_pElvn#)o#V-v^c0iT3Vn$dy z&k~1*AV@Z4l0S@gVgA9ZHWp3CL~5eZTIn4337P5gz{Iw6jKfS;I&r=gE?zXQw|*^s z=jA3c4OK12!*}QT&q~mrPwx*n5DVZiM1l4mImXUpG>@&%e_%L!D5g2yr~heWTmjc# z?=f~rOebQL*PE3iBzq9G%QPkxAaswIfnxo&<}&o7p+T?Rph$ge-BmRU7$<(h5r+do*n*JkNT3vpV9=z{ z9jb)sJ!l>^El}@r^?cl(?88CERLQ#QSoy8!GsFirsnj-wAMO`1lh8AEzyv<`QhKT> zJ0|K%mq;%S+h;LOMwARZ)Z=VYGCG&82vM}WW}!Nfr&6iQCuW#7U)&t*A=U}v$Z)>Y zvX4MCX(tvBNfMv-BJz`AKKFPkhw+)zwf}nBwYDhl;w^W5+FN1m2Sm~s$bCuf2^$+0 zmPFfSI>#cgnkDpc+jXCD*>jP>Ai{aOJr8O!polZM#&s#pcu7f~@kW=ng`utaTTN3F z7z^X;l93!$5MVc_&wnJ>!z!XV0&QdI{Gdw^}du{F``a==#WKbx|@uz zoyzb`d};+)=%iO!r=n`pbtb7Ns%Cy8C@nU6)~U(*j+e(<-VtfWZzE%|E4CT`y_o>1 z;v?!k)ONB+=Qb>!wn}TIypc=)i#wzBK;7hnvzN%Ly`VVPFi)b=W+X^~F^BH(05?Z` zGwR^yg-5#L3xw+;zO{N@?sP{%A0Xf-N>VBwp$tUEgIsd67>@<(4=_|&*+ z^N6i#Pp6195*BqP6~-fJ=_%4vZP*a&d}5L`0(^GM1V)+k@v@r7$1XSxlZQg9Wk8O=%1B?@&`$V(lny zR=rJI>|AKdtQKwki>$BPewt|ry-u8R2t1>gH(lzzg|Y=!6AOBSR!y*nB=2Kyr$Q%? zjOh_Xq_vJQ8iSxLe6H8pU)i6;L*}S6a1J{H7bL08Tpt#=k-{H)%eT}u!AEX{4rkBP z7dY2Nl?C7KDwpmFs<)<b+0*i;2w4o5M0R|Py&_B`H^T&K zRjEB&2HnDDBpa_~Jf99=$9SNS60do)uBory329Vjh%HMsx`Rl9Qx!gI_Br;z3ZiDW zKbA~KDq9H^^=g9wj~r=nb?;PJTa)SlW(%ZFK#fC7)R*u4p6I;EZ>P3yWz|XVrLDpk zjuM#jastb2uYM;EDsnEg=9^G};s^l%88S{^>7}0MfyN0BbD+wJva5T$=lJT&i)6_J zI5v{nZU^yXsA@9C0z}&gVN=Jr4&c~syw);Wf2M(I8pnY_ras(PewOVZgc%Vl$xIPZ z57|^T@97rk{1Mulgk6CF(vyUS27B9c*q7e1GCOa9b)6b!@B~l7HOGg5P!#ciY@D zu>4Kad|xwtC;xn6QJd>U70l_MnLtoM#eZsb1ZV+q$Ugv1aB?kltM}VA zvGCVBh?LrHhw=eqD~#IHeMpI+P`ykITVIANPT_h#CD~+E2|=k*ah}p=FuID9Y74Zb6$^ouKK~D@nB8?N*dL)Esmv9D{{2) z8bjZUF4w9Bk?E}Mt0HQdSLEK|T(Ar9FH*#5-To zG;x_wt?lV(qQW%aqJ%#Xdf$!hT#ndN!AG`a?ylHHnu?VNs?PVfyVo21q zKzYtj)uPfN*sOOL%u>tGs@21&IxeK~B#tL=3GaoZ3`fZ2JsH8j#OTT8ZTi$1!`ekyd>o55J^~$?f5iBHJOm>*!h3WzXq&Z6U%?D~2_(bZdB#6w8-R z53){!5n~(sbIquy*A~-)BD`$bE zr!-VAg1S52_|R?DK8OdVprmHvXvKXyR98mo*mRnLYMFZ#?QJ6B8K*oG+zgDr(Nx)+ z5aBkiSE<^xTZu8Y7(VL^X%c zo#yEB-UX9P@^YQ0o#E{nzp4VFuI_Dzfk0&Nl;T&pkdLUMoj`a+^cb>?4&=vz2H`$y ze}GyEZkaq8?qhwLg^V^EUv&IPs{MM6j4)8ZKCkiVA;H~I+p6qWo3`yG_M25mM*+Zd zfAcW;_NMy}kT*M!S0iD+x(J6~8Xpzye8lkKoLf?C+Ki(ylmC>J@CbQ(#nf@U#`n(- z;3>-gSo@{mO@Dy)$F=f65`SJG=g%9ekBL9}rK=3T+)?BE3&h{L`pe_tiQwKt`LnOl zzdW6a^ndA}jaq)i?74InhW&_A-lYxb86WSRQ3D8LW=@iHx9(KopnG&7dRzEm zM)GkB#YuCsvh*~cC_yp=V(uX>RGCZ{dA6}>#oc^(hsw)ncV5I3Dml_D(}TF`ixPZh5yYhTqN##}_6GZ5R&{Tva1p zsz0l`vCIDSz>fBG3yY0ZMt}Pr!98t62glfN>Xus5r+Pw)IC34YIR^;Wt&jW4NdAEe zOz!KPs>)mwLMuQm#O=?Q_1o-7dG;^9oQCDgX9r@d>~J91FMES^dwd1`15|j@dYBA7 zbe?WZWimq>yJ5DW-a}uAT%CMaWlZ04N&1Y=|Yq+9%c*VD4-kZQ8sWo)<--m(0h?4xFsyLCWa&K?j@<^Y(qJ+9dA5QZ4 zC^IiI!eDU>*iXCHZ1)c4gZjR{c{O7F4o-d?)cR%4+F@Qtzw1$R*r&}Dr@Uh#b2h2R z5yi)IE%(FJI(w9wYKI$DWN_0?HDxAR0~KFmANuP_5y zOBz8arM3m<``Av9YyxiG*r?6aBBVnv!80E0OUzMP-qu`4#e)>}C=sJo7tyo3?q794mKygbAG(<2|O&M|~Cvk2AV3 zutxO(Ix)cwAOkkLr17|&+QRK%-PD24!p25^Ai|etd`#tQe(ndSt=fYdti5B$1k+76 zuyM+=U9Uja&fG15-Szi6H{LBAY^t!Q9ZCaIj`kFUC@tph$)DXk^NTO$Ok<=;EAzi6 zRK1$CxsrU=NuuHQEfrQ+z{HW@Vdh%lWB66!9X+NzAFbEk0sJJrJn@y4?ZaZps87#g zgIpvxDA67W$*7Rd(EH@O&yB+!3#9E`tZ3q!Da$IqR&gb$nQ3?fC>vT?1i)OvRZkZ+ zO#2OI1u&g`PB#ibk%$UTmHLldj{c~?cuXirbd*(f68_0pzIZ;8ys=( zEi^;P1Lk$f;!imDs0>4?A*p0`Y1ne6%Z#bHti+doyy?d!aBWl$qWiQ*;RIjuj`5+k z`E;u!bvMBVw=o5rysq^qq>R)gA!Ofnm{x~5cQ&g7d9>#SRBg+R^x4vlt`i@emdtg! z(qG7KF#POyfvSGGQLri|t8%#Kpyj_7R09P8o!^>8TR<9mrnYKAZUtlabF3V~&(r)mw};c3NZt zDdq+c#+;Y;DqkwD4)bZ&wfb+J<<``Ptvx+_pf>h2R-y~EPMwA=P8L*9WTnSbVzdWD zzwU2u#{@2jv)0OL*)Vqa$?r2o#jdqI1XmiWSVV}M#YK8mY_TdO{6C5vXO^H zfrmwDzVK zORb5#Vnuf?X^<2Xx339LsaZcYsCt{eVej6u(lPUAhk>Kk+`t@H8ODcQAqr`voX-Y2 zqflwcr&{v%1np{H;TPodXH5IR4L%SH@0-i4PU(0L$Wk?Xe%&=`pHW@9R0hjsSJgHM zmT&3W(O8we9#R_yQ;M$++N#NLg0^{f$WbP1BKcPD@^d#!Yc6p%zb^DuH%8YeVCqtZ zJF;GgKTq@H^z?J_`Z34!=)TvNpvlIAu*o}ZaNP?T8+|5H5~LELtqrmM z@canYy;zpN9cPxRwBir z2}$OFj(kxja*B*jc-+382lAxNJ%>(zTAGWcA>g1H6CH1g6DLweMKmQ}%lkQf$~|X{ z#)U;z()l!7%>BmzLNyGVL6(E(OeUcYmBCIZ2KOe@K#qeIJAP+*+Ao1$^ky^JaCkui}+6xJ&Q1 zN?J@vukbHbc0lD{Sx?}a_wwE{?i z)j+Db^&`PB3iDIZ(Jd@yi*%@KpZU%F2v=0Xz5SWSk-&Tf%1Qe&R$5aagQkSbrV)n# z+Q;NX9j!I|IXD&Am&F%)`E9xK=eksl&0{R{`|cJgG)eNK@sY*d0lW(@Nzcm33JC2$iZnOH!eUlFTj+&mNKoDhT3~tVWMLHZzWPJn?!8 zOw0zCn47I1-S_TQwWm)i0AtrjAu9`j5WY}rvRr{yj;|-qf2fwp?Q{f%q z!FRFH*97*7K@*9j8H)@E3qLRWmu06|5IN7KWcR5xL7kda_3)N`%?Gy>@)=X2h1d+q zpqrdB%NGd0uS$3os-gpbfEeK9os$}_Nl8`uIn5l^q#KE9o|)FE7jO|{IKpi=zVY#1 z<`MgrB|^YuFyhn8lJ<<}U~-to_UphAy6RpDqhKS*f8ZYlZNi^i=Davc3dhVTkj1DP z^i0}^qV0ds6EKLuUKbcLY4Pxhtv{YDfa5^$J1z4tmM!W}@vzsaXgi+P)gczqZ#a4E z4w@`6$4!gkUy5m&FwNCvG7DCV#G^`}HMzcHItIDZqhiV$HGnpSnOBtvR4(I-FMZ|k z;Ug2zwIl7-m;)N%eVZW+1k^*6j|8<#wWFb(KvK}1&g0A;ZBf?aCg(9>tmx!xN`*f_ zjftklErcLuF7l@Wl%CpwwGXv5zLeS5HrZD$D{fs)!wq8g=c%*3tMFeXzVHy)LD2jE zkxqqKNOPvu_Yqh z0}I_{Pev_SBtU1qgaHBZL=F!#S6y|nHQcy)MDulSPIIPo#qc@znepSDRBnHg`4~Eu zkKKfvavu|~-m|&-k+=!RPKYowYXtY`wdU~z=H#aoNK+cuK6ynGepApVEI=*RS?gJ3 z64vlfvtdP>>C%U-nm*art~MD8iQ0!0Ol_SgWlel=8LiCKVbKwEo_YxnM-!|>vY`QD zaXu8n5eMAp_#)NMBF-h!c-diLRa!g%{dgSo769!sM)?23Y~w!^9|x-0{sy}JSH$0s zpLo9*$kwo5kzt_JUopdb=)cKh{ySvi@ADJs!^PhG0lGf?15_=m_jhF(8TKG2qi`fZ z>U?VNPZ*dsP?gnoBpbqZSk9JpEWKIdJW5k^|3RHhdAC1?A;V`BgtyYc{=yPgKK3qr zVeD)s{_dOZq)JuNG8iwmc7sp}g^BkI%a>uo+U)e)wqOWa#=QNYlLz%3ud`>tJBz9E zGufueJTF$6Aql+j2N=936>(}V_g_B1!r!+g>PI{q4O!GzF>?lpZ}wA{w8J#|Y|GyP z#5WQ^{>6vZ04qY2Vzm4+b@W9IiEVjZdG3vW6>44NxB5z!vD@PNFYyr1=vsuMdCwmw zvOJ5+UuiImlW(`pgDOtdgcK=XmmOZk-m)H*4Zkz#f^yp2*w(~K+o%Sh&aRs!R8m(Y z$ABpPF}<|fZmEd3&neEeZ)w~hRyS^QzKfyW%j||K_E;d(dN0UbkC`6yL5Ksz4NXh(}gZ3zsle2g*o20HV2VA&#YY|1=l zz=SQCK@Nqip;sYO`G_K9|4bj8a;xj*Le|oy){h=qH&EC<$MvC(@a0n{tZ_f;%V>ZI zRNEhj-7rDEyChoQssS15AHjF1@g0MI*=2l3=247wBU{0F1z<(LUi=WvK6om;oIx<3 zz6-rm<_efDb1#qtT=ith#xVeWUF}maaCbh+XP-=^c;QCpTuo+)ka)~~8(Rq3$IAgI zPqbkW-|3d-z4Qc5|V-@+wRmkCWklnnnz zbYILM25y7^|@_1D{ zMMBm7f&}8@MEb0i0Dl3EBiXJSW=8Hvv93qqpcP3y`J6N=TkODB2+dN~Sg{lcu_Maj zPuh-JvsT#y^`_Gc`+?2V3#p^FNr-MbX&6L1&Xv~VrLA;SOqg@T6VMYD$+ajP@$vpX zeIfz}nerq8{ipj`0jnF8KrH2G=sL$hv1lp0fjtAqfOm(iD&L$%J~&RlZM$jJ79EaV zhBou;6h3io&v!8|T()Ey&AzyB*>mgi5ffp>;<*-ekO@6*q>ht79(ijGpxd-nEh(Yv&XH3&GXQCZg$T|csz$h>mRI}iFy zfBgj+65X~@bmKMXkne>>UrTuwR9xN)nxD}Z}8%x#*jU~F5i^Nbna z3Mapqu2Hh~QOz(UUg}}w>oD~Ss*M>KC*U$uj31FQnVjwozvVy)e`7wLAK&N5;w&XP zL7%kap#0_dV1SFAz7;4xI9|q3Vi83?_UeF<;Eunj!pSWer4rmQ@h_DiYwvcYz*;>X z^2Mw1xyP+howDsPBe`!V^uUlPm*Y0~_C3nYN3_MG;LdtsE^x7~AprKYY%h@}t2E%Z`4EL?7$NfFRor z$C}};z~3XMo{EgSYkZKnRs{~auWK5n^^(@WPk}>>PyKW6a}^N4FQk47{EoD7DS`OylhClimH>E%V2CoF1y(*X5|vvYM!AAPd& z)VSGGEAS_wro`XLj{v*}y#T1`Gvs~tAY4Qpj&wT=a#+Ux8v*aXr;DiRwJ57H@%r%| z-_yBgBiN?(u}>tsaBvC!4|Q)D71x%o3m1U^L4&&l5AJReEChEC76L`#?jgb56I_B@ z2=49{JQQBIQ@H0_d-v|{?A_gGoO{Q)ciitsvZz5-t*XVEbH4L=9|4k7Q$01PF6Bi5 zc>P%#0IxqN1>p5{5LiWjqkfeFje9r#!7RtXw zS8tteVl*w>ak#oryUQ7hvyw@RdllBf41_;nJp?)Zy5Rrz^Z%%JOCNCTzty4&qK#zA z!CO!)Q$Yt6WWEtWIC-p>xhg0cu7%AQC8~VPQ~D#E2N%cWW0AjOm5sC@@CRWTOblr<*HN`*=lAil*`f6>KSh<75F}6{TtqLBF*WV+&tZCT< zfmveMbvdz6w~4f@)d_ON`9H50lZE0_KI1c7WMd>7xS_v3XW4yhuSk^Q$yDm&9WiXt zEzBPj-Cd9piuX1Cuae~@JA>ir^V={+_rnc2LjdOgr@w%yX)UL zBeZXe^!)2lNV=`^KID-d?t{bJHd@W2yw(q)#bf17da7;+RJGj zj*4}3*mMb-m62)HG<_#dG;vC15hyw@I*d0=S4UlC#7$rrifA6BitYGbk5@&DxPXP; zEG(QBDTlz!Ka?p?`HyfwgohWm?)ttnk0eg<)SJYBHk}d>Y&{3uAFmp{|C3S!Fs=F{ zq5a<-|M$&J|Ds9x)f|q*bS9&S~qJZA3)50RE zD>tA`6pz67`{^~5_8~0$z-HAKQhVcid+lCvmvaE*bGaNgjw+^pf7rlOlmx9w$lbf2 zXs)e1adr27mni7E-?QI1>lo1~c$SIoq#?hr_iFsy*K%=g_NuX|wMw2`W4E)zSh{9of+ig0ii1cJ$N4Mg_bh@lRCMVib`rR|cZsCNwXFGIcGrYBhPR@D`p~ ziYCHs0u_#FcxWJccD<~iwzV0BBiQ&(Z@Y(c=^nS95*5_7o10ga-Qshgh952Uhu23* zp?sL0x^S#Cck-u%Q66mjP|N~$R&RD(zJ^*dd0>!o8C zUw%deJyKPUNqua8Xea?kY4agF3XCC`O6#7SNfJQER~hCdMniC?gMMYaUd(>96h1q!H`)tDd<8C60R!S{dd%#T*QCEAE|y<**p zRU5uF5n)vH+LZK}Km)X4P#af53QwnU^CO#p>@DGEA0!*Vb0T3ub$C@Vis;5BZ%Rpv zqD4!*9VN*o|7T}igtdFoSQ)q!w2=^}1kEq@d3k)q`z`LSI|awHCAV`57M018J>&QY z11J-SiuVbfeTBKuKx(~l6<%Zx66q!&GJFWvx&>?v*7SVuMGVcYRGk?IQmZ||tuK1Z zrEN;C)H0gRLR+q9BaU9%55mF-VqW--f5ku`OMFVAB-Rd!@;Dn&{aX@d+B4ym9FTv3$8?0mTd3CtXNX;1#4M&vi+1al+S1+)hRbKg+;s zFSCT6O$-Yp`}EVD^majLjLlB83F(m$K#u*e+9s4$mS!XQN%|QQx$&vDvD;b?vyEv} zv)Q?`c`jsV$t~hajt-a=xmDL)VznAvD;)C8pq8_V!ByYZx6_$dl)VF^>u91M$g|GgRTyA94UW zos|~Y)?PR#yP>}lTp`)r5wDUUL>KM~i?ZMayosSBD}yZ1k^8_>0wwjSs(r%mOIC5E zO$Z4BoNwtJZiWl&^YA_p~^2FLn z>BfyuFSI|c6iomF|4a>97D1#dXB7u=)ZN)O?0${~VT;@GNFaE)zn_<4$#!;H{IChV zL5?hwLad)I6>#o&KWwV!cGE$m#)@MlTXNN&K<_V(AleihD(uRKlP|>51AHomeid8w z6k#;xnyf-?OU+@|f|11UWj>ZzKyKaNar=GkF|X%GUgLCf1qBvwGMm(TJ~JY+js+l2 z#)7hWH|gEFs}~sBrIpN<`nC#+gv)Ak-$jR<>BZM*-eiSv^P_#J#wWD4A!!pl)Hyd4 zIx5R?C5Xtqt~#mdo61S19`az{jL>4tBw0HW%2G>B;tm5hD7Ixo8x$AzY*h@UR-+z+ z8*IZlaC`1IP?y1PBk2=vqrE<59GGPxrAA9*dmL^|CutmMjMSP8qs!;c59FewM5;%0 zf_}V6aHi%&%##9(ie`~IG*9jy*W0AL;`q|4=J9K$uww@LVYxHr0~q)w#O8~^^Ox8n zRb}`xkI>L@?PJ|aBtksy){?7q8RA68_`cxEJFm;aky*1LCF061Jt^<|Z+`4BMN+C3 zZ7;1fFUdb)h79-Cs*PFyD&qSpie17`z0xT6o%fAt`5 z4s2sEyX~ZfgYxm#ICn==Ao2S2Iv!`f_q@)+9gmAh!;7V#S)gx7QI?k64A$ro(!o{B z8Xvdsd=)CkmIj~<1^+&F_RoG-I)5%bewVL5hy<_=zz2NU=!VU(NcflD85&Dc(^vS( z!pONtLW#>hZy>(8>L7`UiqbWY1F}eR^2T6KY5r_4q(Cnh2I$hQV-j5C5|PGnn z+^8Zf_9ELW9Vao~z} zoIn|+gA=y0M$lRbNH!8ac^gASw;7-b!hXZ06&eM>-BSxzgH+*7ObAh3u%}I>9 z88KJUelyqX*z0ciB#&)yM>FItOeC=W;nD zi>o=U!DD{Yx8kESz7nqRvT#02!K#Ub2EgRIeI_+?GCqH>Qunr2n3*MQ+`0*jm4%<& zl4UFQX=$UEw7@apOe5GD|)q4(dIlHet49+XMl9?h{ME;IHv|gJJpX+-o z)+uG|0gT1aE@1gnu^4G+$#>3iJ5-)xO|d?2SeOqPd}#gpN*b!3U>w~k>M!Ln6X~Ty znYKdW?9Fd2o`s?EV=zP|3aZY3)-x@QV`I)pOzjG*Q($=s%*J6oK3roTu529%bJzJ@ z3SvdJ=)>%xKSAuLWM6s0SCjk4Y@C%y^|yR58dD%?Ro}eYzu@w|!ib3Nkz$RZWi?_) zwjcN7YeDnvn*xLS>{&ODh+xp1y zBq_=RZrw94YjoJER(Z9!Xl1|aGt3-MW`9PIj^S#MzO|ddmpEiL2a7W!o++OfDZr z*r2@{u@o+iHpn>SX+g3*>1Qv_-J2_S7j>6!Z*Ln_bLDH{FH*V$KWuclN%5{*B_8o^ z%ph|b)EqD2ORH;$+|#u|kltF8qwgfQ5z&=MO2tSQD)9{?`nIFX5x_^%QBv!orY~E7 zDmmW!ZVyD4Y6g#e_u!&e;>zE5?&5w<%M)fxU`dJhO>bD^$7drm&X)xF(cw1G+<4<> z{d9i9&eW9Xs~NZL68kqlL6D-T(DYrE%WrIk?aom zBwX|H5oRnih`eYE7T8JxVVyn?23>-`Ceksfj`g!I)(;v>qR?qcz(?=jWSrjt-LWat z9$kyjTO%`?xDK$TJ6TrbnQe(#JoCYjLV_lfYg`rQCd!YsTkX(u3Zjna9r}nh7d85R zmlN`V3g{#5SzrS$BqWrH6}5sA(pq(D@xAQJf^(kfz`Vn#b?IL)U{?&POQJ4)*Zn>T^yK~*}fICa&$x%?(*OSoW31DE_3FQpu$y`gGW8c;Ng4ng-1)Dc3E=) zk(N6YMn;RMPogWWyjzvjzAo8@G<0S@5lj#xG3NF2sxC>b`=(0^mZS zJr0ikBG6oqY|NZyA$Ur3&LJrpvyhexb3jdJ=n)yS{?=`s z5Ql%hcYP^j?XBqsVjWTv5{88>O7fvN;XApUOI7rwDD&z3`ve|}@NP13$%oP|S8i_} z39@=v;3Gsqj_~%(#LH6-*?`~fGb7vN}ygnE#!?Im3V4vL$HC$E!u;z^$Of$2n-7o8)HvpiU5)A<<<_=DUWn;sS+yLz%mB z4k*J@Q31DT(MSUIPH@28iGy}J%5<}<(j;rO9+NYsxOrVoodUcs4Ry<7*&nqXCE87N zMhF)5*bVpMMMIc+Yh}~Nmp7)hv5PxfA}rFSXzOt5rQ;gG9a)kRjsVh)&v}3T+SMP+XN9Dm1VgX%`friySu#?y^s!>mH1Zk7BBGlCY%q-?cWaB&Tix|?3p`8%=Sou9FM&?j$S|q!IT;6?sYWkiElp> zqiC30xb_IdMKvgLV?A^*LOsa+3TLQTsbeQRXvek8`7HNiAF1%cfS(98R#aU0Ci*A= z+YHdy4RPqUeEgzd?}_xUh`}-15 z8MzM)uHS``p;CG7lk8yUdKlf(P8%&HofzHIG~N6HnS-a^A#&W~>~b6qUX}j@;mG{+ zdDTpr7_FBwC9SO-8?CoeR7X)dzCK37zL%ks*?!!>{*?0D{cUS9AZ5k(?GQ7+yYl4& zbgnF_p&(#x`RFltb9@g5#G@)eOY1D@V*M6S!3LMUVnWQQ7%x4rVb6|9WyOUUe0d

    ygZM}u(WQeP zRPbTq?skjQ2JSe-ZgI*4e+e`QrzMN2O zo@(N8&)pppV?67jrUgBYrt)6{5!JzM-<;+DoaYEII_Pc}#uHhxDU zYs=1y=o95m85pzM*B^Zpy2O1>=gT9;`*sHsDTrN*q3cyM$qZSR){PtKJsV6(Ph3TM z26E{9gv5p+!WXI8@#+Q&W({9Xr`HGao5tlB*=F>3Mp~#YE#xMv>>>3AA~L{jDmfP;#FBT`(g*Vr0ZW8e53-4< zHK-6&z5MDzBd0>EWC!XcoZ~USQ2JSjS=YFd9tKhNY6d`T zZ1yo6bC42fKZV;W)7eSd*ZWwXZoMlsWOmwlRwm`NQmM71#2G)eN_V%olpd7fHoh~X z{6S{o^bzUXsRRZt9wytgk9IxUIFw$)?e7^enpPXd>E9)K5R^q&n&}OyHyCBu_!!Y< zB&Y-2Y!=p_E_X+_dp`bqaOh`6PT zdDcuXh10#k1x=IUPb`xe(p+l7JUxn?8p2=43_JsJkKcS1#&xkcFf*kOvJWbk2orUu zZ%J=Jw;b!}P!iuZFyMBPIHaH1e`&V_+YG1=+L6-I)QFaFGVtkF zJkMN3ED*gr&wxn;lTKAGSz_ku=y89klqF?}qW?P7xTb7IonwvmNp=2gnAzuy_sc$U zzGaV#Po!9faJRuM-I?Oj`-dYD=C1H(r=HPfHPOcJdsgp*TFV55wmdb!sK_A_!J>=> zjkfYE{iPHxetXeXe$hyumL6jc+J`&y0nL#Pp%UP~-{$xaKmY)o9WZjZ0W>igN{k1G zOBa{h*D7$b4x)yOS!YZd4&h_gc4iy(1)TYvS$6{I=~0l`!FiU(2d9;%>{_o#mn>zJ zHxzojb^e|AVa)ptSyt?=Ewq|d6di*nuw~7b9JV-buK)2{!%kt`Fr>ZmJQFZDLe9^f~(zq_}N<*Frz>oou_FY=!c zM+#SV#~?h>2J{P$kpi)H;7^4_%^;g}o7FO9mr9x+RL1!ZQFWy$(cNx;m>id?c~H!y zT*MG!kryNUaBwfQtsV_-;4ih)Q-sJXB_6!6E+kHvMsoBcU7~*^P0ankZM!|If5a;H zsg#h6c7JhA)eo|!`phZl6u+ZZ?IP<@!k#7+=6)3Oab!F;n^V&)MoZ+YL@+KlItSl& z#XAot_?PcP$1*fQGSbr0nj{#gs;JaQ;NZ#qTVpu#Il6p=*r~VhqQQ^_F1tV(di1?f zawix9%^~|Li^8`}IQ@NgP6}Akp7_&+()U9O{FuXI<(6If5*ZV}k2B{r4UoB8E9mJ8 z)3etw{Zb1t{aOcU<`N8<0h+3RmP&v5H9kVpuZ2g>FIAatyfX4H6GUNfO_G zR%R0aEXoK9r9l7n=|-0-T7-^j}Vy!JE9IXM*sQlq<`ug0x5dGe5Sut zih2?3i=(#jBd(hw!D3unIn&i%AI~K6FEt=a_c}oTsP$a8SrW(n(T=auQBI$d+O=2`GLY1 z`H1+yL+Oc_()K+88-d+N%QEd^n(c`QrsHm%zkVPUGsEW|856fm$9R+X87rz|lH?MB zO_+sS_uNxnFI;)CEq%{h>1ix6I!DqN4BE3bsF+Xu-Af0t(YAEoR@fKo=nnWe za%-_|d`-C5bjV>vSVo&K@LgZMy;epcTPa7h@&Hz9qsI%wh)|=u@_yOm7n~>ga!GdY z5@ZINmeau;%126JO z-v^h0hx$?7hC*rP4evp<)pzp&IgU4vOcuU|I9@AGb4jKKw>|eg*Z(ecE>U%rVQGmcZW~u?)At3K7%hYVF^Dn> zHk_ypni1v;u)4l&_;Xa;lj<3LjyE1EmL|Vf-~##G4HRkfT_gFSlrsrRI$6%-fl0 zw2KAFr*mHVmb*pCf8RH5s1?wK6wA1?b;V|IOUvSa`x-L)ZLEU+EBJdsCWaEhr4=^r zJ1t$kH?Ou^pfWwUW*ht?z0?+g)E1W1&SiSQ#p*1z=x7*%2Ih;E#K@T)G1a>3T=2#S zGI}v_eH7arW6H$lnH0r?9b$kTcPSCw)~lNmboG$Aq>**LFljW^4ZR6}s)>d8#?_QB zG6k-sHC1}x9AZ4a$5sxph@F0lgA|i>2YdQ)WDo5_Ld=oeZ8K>o=wu1k4Y5n@vi2S~ z1AS2sIyAo?B*Bi_Y%M?D!Z5xqyWaE*Db+Q5}$A+|Qaj!&}4 z9z*m1!^3q~FJ2iX!DzdZ(ys1(oDf?jQJr~Vm#lkV6KEYseCFum`* z3D*i^r%gYX4U-NYI;Q(pHp4EdPNv30;L0ItiYOi+1(I50EWDHGi27s9;UlP?Bi*nZ z`vy%+dTIzd>FIsy8aYO`UVBityn2&AUiI%Rz5GN^dUc)nfw&ge*rCO@MLbO(F5Xu5 zN{WHUi~mjXoLxHC5#xt4TXru67~6AL@91n(h;byhX)u$zX29&H5*_Fx#;;`uO~sny z5HCHr)#|J1GJfc+?sH>5NJO4S&}%V&5z|J~8Cf=;(PUQ>lXgUnuDh{&cZu!tQ)RN}H+C|7x-oyk$;T=9HZ+eL>;|166TBV29UV~cP?U}B^Wklu{s-3V6SZM#*Wz8E!kzt zx^4&O{T75Hz<>hl166v8EeDsqeN2!+=Y>6a_E%`L`X+`R!BS}IyN+#ki|jG)O(`8F znU`hf-@hL%1?`LS<>sYW4L@NuOV(WOX91B^#Nr!Ef?Oy@(%szGxvQqcM*GGwtD!+sZ8&lP$0Z@>j_}>$<}%~?5x(VUP!<+zz+tL z-n81^<6|w0dFI}gQ;mt5uBO>))Qo3Nx>OHk0dl5%E0E3-GT+D)(yHhx!5%?^6>jEu z9NHiDHF2R3Y%!q8x(z2B(xR_ zI@E0_58+k7Qwq|Uv`G{Z33?gmq?JbAP!$sO)QGw$7W)-wBD+X;rvY-1+|XS;WNQK^ z3Fuj__^6-Asz>Isd@#cGA=RWp3nt;eI4wUl>4?PLjS;n`e{>u|f-IVv8mw1qn+tNP zQs+-nHq!0%v|2R#UG0T-AO|uRq{fN%4Be<+!r#)(OMtQA_UsO~RE}43RJ~XqND=+= zE@Q(?L+~x4|2x1ZuL^(Qt?tofoEq^f0cB8cNZf(V+(rHN zO!AHoHF$945Z%xK*}Sgqg{75BvHP<*7+XkDrm4 ztBMI=;LTm?F6mT#FNJtLW6UUcw$aPl_%UUQ&N<}b4c@1Y)QnF*K}15C=ohW<9RQ1_ z+F%0AB%;)RQ^E2`9o~Ah-lANEvbL^HNh});eMiCk7va?j00;lYr9>H7B2-i*|59El zN@dbCSv$8*&son|+Iy&MIr@;ehgNJi6;AYi-^(g7oo&p|3Q)e&~Ct=_5Yr`~9r-@nJ>Wox@%B!J&wc+1^*>}PQ zEiw6lK>t@mAniaNcdrZH|CVkUVHAYL67et2nZ}u5mlm4wH`CU!vdi@+{H*Po&(<4X zIuN8fzgYC+{VM_Ve<9?@I#vcBa9+W|kr$~zE6OyBRRSUCCk64bX?6Kf*H6KcWqCgl z^~+nV8_~XRgA!AvMaq$&_wj=7X<8blmyZ_Tuf5*uG9ZzRWoWbIMnN)tCn>+*zsLCu zrPzNFT{{u|-8gZah-t)g>VCRq5NK`VTWfWC)WV0E;p;16qdBDo+r-rBS{;|xBsB?O z!T!aY!+cu;epBE(<9nCieM*oZ zE%j>XX17V<3U$8fxeb&hStW=0cs-&lNy{{N!V5XpZETFdY^-zx96(oy^#SASzUk99 z6#lVG?#}>IgTfgq3wOToMJGYJLRDh3$G)_aS}$^q^o^tsMU6qUrUh?Gu%7ieevi`a ze3T~I-b3J26elMouE%Dxu$ZsOgURf3?CLLP&}?G_2%ei({U_*{%}(GRA6`?DZvegYzlWZwBJ#!CJ=7wS zRnPNvwBhV;7P7{i?Il=uK230!3A-iXZQ-r;A6yI4Psqh=KVI+w=LE*Fy}ucm9T^*M zIQo=kPNK^^qp7KD1C}l;6=839_hPvf{j#~Vr!~_+PX|39_Mv5;s}qL6tb&s(s&T@rBlXGOd9dfutx7D)XNG+q=cI8!b5q1T(ElGjZ@6F^z1t zej+sZK|w!VI*Oy}jM!`WLYCuRZw~y01_le}GR1Sfb0G+z)_jkgkI}G^4|X(Zyxq#y zz~xu*e-oyC&4qG^wpF5fAj0C*>YTX!Cc=hJ5hoW7@#~eA|19nM)55Kid~u%;-vGK7 zNdhmM34pS<00*Y4Fd9$EQ@iJ5;~Ky#y-X#6>A(@tfk_0^A>qt-|- znN=`_XV=k-{M@*9>SHLQLd_n%c8sbH8EaRpHau>Nu-fa%3dB_&eqLL;q7*Kn;ah zaO>Gd>8)Yyx9=q-drn|&Ym+I1b}xpeLx(ta#vs~CQG%RrZYW(o^v#&`8-?Zt zMM_U}D6(b0%+J-^=^D%(yF;-=Qhh_jDM!0d_ezp^y>0it(d-}R$T2wD$P+B2Hkr>> zq$?pEdz-3^K_yW2w@B?QKl8dQ z(`Hl@!>I-9Nb35MO6_Pg*!YGnX~vqp?zKcrjXO6pVH-kq*s)5;QN_GFePHt zJ9JlgV8mmhaU3z&*xC}A#bwV)?%%EMcObiqlEG^4Ec`r=bGeCl#v zQv^`NO$3J9)<80ltQ}vxYV(%nN!BE#kSfpIi52xwTw4vlfX|Gs@pz+49d8gLH^o(Y z%R^qSlIJIebXoR?E#%SR#We59o-)6v`uKn+N)i!9bWU{35&;0a8CQVP*Xkz- z5Kxxh?{9v-uid=40kD1Mp{c6#BQGBU_+kuNk({|%QcT_?_ESHi-Bx7nF#qRv(uYl8 z#haClM(oZ(G29XCDl1j1?E3>8)@P;*_%RTS9=;Wq2M7}UcCuwtUP5=d%bva%JEr|H zRmEU;p`~>Ne9P(<8HAcXzh!D}cF$Boo_H?Ab|tV`9`7mtqA07KnA7yL>1J7y31x>WLhrdc! zd{p)%N9+bU8gc;108ikGF)*b^L*LMOLl|pnL*;R57W_0=P4$C9nwlO8YV^{E0<9Z2 z3kCRnkd;YB|HAr$&LV2(S2=uKw3;61z%yEwKBUv`7qIdNTh~!RpN1)CQT$Xe^Ut#p*(!dQz|TGv0dA+q^Bbfv;>GU5n3 z4Y4W5CAMWNBYkQb5Q!E|Da2p#R@p%-)7$A`tYx|xm zbUPd30g_rR>7Ssl8cFvdk<|r%NZeU6;AJu8ySnO{E#%GjqYd&^G3zI9Yln>KR8z7w zO;5N}onU^#Xe`~L^4Yh~kIGW+@(V8`r6Y1>Rh~4px>L+CB61Z)CCIHsrq`i-7bnRp z@_LH7QQ>UwEOz=$|7im^ZwN`3eF6K>s~AdvXDmSZ<+ttb5-dO5)_PWyCsU5wFrG-u z5n4kRNv9Pzc*VWrQvRuzB>=I&Gy?mhZe&)Wo6Ux|^ai+IOD%y=K7`T&jm9tFVZUHM zBjqW=JfJ2+ST{nH*bo3TtP(elhQ+|9kLTXh_k0!5Nb~Jf{S8o&!Ut&zgo?hP3aSG9 zNgm?PzwQxn>jx3`$bapl*J=@o790U99vnbBpz?$LNXlJ}fK4)m_?Ngg#8@|i!FM$t zzG1dGaI+RK2R()L6fL`>k{DDdP56Vf9FgE&R&Sov9l2XBpWD`lQ=c_b#vg;z_1xJ@ zetfSg))pN$-ODgJv(<4;^v6#Sfr#$=ZD~`kQ5o_JOBj zEQK|!5EyGVKUbbIW|ktdw1`RN{@IqHEyD(njVr)90iC4HqRH^N9ABdocg4k>kTaLF zdfWGHYpfL~Nd zB;DQhTIf3;!>6lQ4>McB`w=9bcCy%YPKc&g&&iMlm9xt#9$Rhm!k`Cb2_x<+I}`fj zkxA7PN=71tOeKR65YsyT>F>>5p+hAogMGV^#OYVCG;p!#jJ%~-W!uP?GMHlRRB7Z;0v5fA=$|1tkf!gQwgpb3E?m5<-PA-(*O`4fb0DoB1onwMl;sx6;qW$%7o zxpzW458pIpDQ6n!+)#GoMSxv1l&_rRfg6^B&j^5}xX|1c^P8vuHk}kKmZORGIKJh1 zxLpxf^drQaOM&uj{%gleAItL{0o{)$FOd7O)*io?cST&@~uTG?llC1D=Xa9#cwzEf4OygKm@zD~|bs+1GDG(_KFY8`3Gp z42-4g^wLTIo>#xEMCSrMM%P#XyZK^Y;_3llrwBU6{=l_^>uc>Ds1ZhGveaZD0f$Ge z*m|6{R_^`;otzM@0XbFf7j2_g00uM{bw58-^c~q~7Wn&KU4P=}IwQnXRWX^`cU$rSq=+i$&e>1zD4O7ZJdr&67iPD_5+jcf}q&cq85l?D@ESfUy#xa_;SW?_+Ws1l-1@uUKA~ zW>kc!fy1&O+;m|K46Mi`-{zyDFGh3q|Ma52ED(O&_rH99b?*!#fK&6S73c``6#qsg z{nKmzbV7iRuy+jvklc1(Vc+w~{BpH_XRiKo#i@R|+P`sE|IeJ!^Alf%?bAdUf4KB| z8zQ}(gTz$iRFtiwqs3=x5rNyrnnFN}i1cBH$3WqqGDaN_inA||dGTavT@pSa_Yp&z zud&jrJ0tTI9kLh9pO@&Z#Z@l$7mF}{-|8GtQ8OHcKFb|SxYx~YuE{De7^t|*=X$lgJXA}+ZwXX{jd)1;;BIGpZ;(EA|$D6UZv#YvS@?UY6>J%2veA!`=DmZ?| zw>h6oki%a%p~%j=k-c`dk?`5L)OlL9)E=GX0V?(5XFC#8n$A^P=N=!l=LQN{Fz}{% zjt?o+&(BMv_R}Q#PHsmbpTytVrbSSe<|ijy1E`ewP%s%B_ZB3D$k{-|JAJlU1^*E!*@W8wl^@PA}fXiYwunjV*K zNEZl(%oiam4RPHfwl>>nu>B)hFcU5DJNWG)EkPHUBm(UL^7G|4zQtliO1}&(1E2

    ua*bhA~>S65BML)$lQf}3fq$I^d=;^i?l`pXyp7i!dh zF&F6tSM$!D*Q>tXwE2!KY;j2ht#%o`Te1jAO|yVUw(u4?4|J0yuP*k2(AX ziaf9!*6es@D0WGkntFMQ(VwRYjW;SNT6R~}>X*8nh;Y^kzB-Q25hJyaEOeRNtNGiP z;Q!lU<8;pRCy3Kfr;F}*Hfe(y`PK4~j*@g~hKTkJz?P(usB*8Hjj4Sr{1a5No8htF z`IfzjJXiXQfYq3z0{h6|fzYn8cWOzIw86oou1^x%e#TAb?L9u^_XC0IG{ksQqrsmy zc+>GIQY4nddXDRPaRmv6>{|pgrrfqIGV;j$X6NyeVwQpt;nqlY5xv&J@a;X&1%XNlS_` zgss?;Y#Gv5ryDpJ=qWnKisaV03$UBU#P7wd6Jr&gu-I%8_b6EGETL5wLkzu@+8dI$ zcb$%t!klW}(mRx|(@K`s3m^tS>p)l@(>^Jyt)!Q}UEeJ!d2r(E5+5eyKW<-_VNe^9 zd-8xg$3;6eDn7h$7^HI<=2@0OZmG)#1cMk;oHc)u**hL2Wl^waH8fUtLRzKLcP{dw zj)Gx#*2AWJYrahG@j75Y`<-eY-B2qhJhS3~8jpZi(k? z5VE5vriM>F`5TJpZzyG^bKwjLaIGE{Jxgd_3fK}t(H32$`us81L!?NbF_9IGlmLoR zB$yw6lPUbciS*I*XK!=D!3Bp&1q8@985^{h2a4)8#v6W3ruEw&GR&7Vf_2m0Pab+H zI9j+m1Ugk=I|S!RVc{IX-Cyz$Fc&;u4L>c3BKZN`_TfD$`|JfD1Y6{9^Izb;CV1io ztBavXV-TiCbd~wTC!xl(Bq<5!0!FD*rT?i)M`#EL+!{WufbzMG#9qO_Blkq_)7LW zi;Y6tic1*?F}(^QKl@Ux2TBwSv7w)#pE~W;p>2>4p_{LlBT?+9)M#^7`mEJK(lB$f0TjVWB3j*uVYZJr+PxHNJ<~ zCzmRJJE&JFBk5V(1Fw|`Ap7G8a3}v;Pm=Uk7_Rbj9NpbloRhc+y1Hfh`*tJD`X^|S zYV#=Pk7MDxOw>OQ=)_P^p-1Ii!oBgW#1KH-GM?7ugltC83PHbML;`hkqDAw&e`mDh zz8RS_NekEd(6Ychn5J(1gDYxDXY*1E+RXf0uU+Cc!Vn$6aDb_AY+7Rr{~zeaAFeuo zfBfITM6{k{zPk?0En9F4>f*w8V{j{F%X`@L?F;v}RAvHYyDtwO4LXzX@twHBTJd^U z`re0*v^Na~(&xN2iu3wp-XjSqJWj74ndr4AWmPG$l0(gU_SV80AyOxV?j|m|Teqs_ z!k8n@_XvZac+}PE2Q?A}$fsg+(cbB^5w^cLhdWG@cAIvB{ zQF5@aD<6FEHNHT|d=3dm8fvfYlX8+{cUO8W}u z6<4^b;x=8ZQa@s93Z!Okp$2~@f?hS2$yrRpkNRTEa#EuzxpIoz0%e-*Fl>$ad9;ZWaS zHrxo%b#$YDGr#)n-~Vf_m%nlC9iVA64krpg^2PLQ;Tz|Dus=gDWb)n>3tK@ zv{$q#5y~Y2oMiQ$&W+obaQo|^F#`txLGopW1mg_R%{D)haBg+skG21C*0;&#r306E z@y(m&=2?nz<9ixeTMRXQ4DZzqUiw(muYPC^(K5qz8_kLU-*pu6lFG1l<_p|6Y)z}x zDQ+Ol_cL?fRd^#l4JVSRRnY;l?F#gvYBgM`8-nIsJHxBPXJ*|ujG^}h`b^^)eL*aJ zC%c_;?XtAkb6|&VpLJOYXhR^?R5(BQlfLK(83E41Jvszkft=x6>kHZ}vJb+sl?j_o z{>-oZ{jd5*#%#ggA(?-SUFv3FoB~Zwzn>s3sia$v{9E*^`-3=<;T@pixNSA=pNK1E0NOw0w4I#~d!XV+h{O$eQ zTiu?0_Va%BdC&8_&-sHHhFPq8t$W>heXs9zi7*Kh7>gXL_IJ=!6$fiPd?W?!Oyt+> z@mX2de%l9*>mBd!g3eYo;S3Hc$%2n%xjiJNHb%eD*ASV=%9(QTkrg?%k>TW%-@1|# zUrS^khSxFrdRS4oRvdUlHsDUYwH_CCk*aDcpP;I4t>^W)RkAtQ^`?rGpj#(SY#2#6 zM0~t-OwxnKH11QhEDc`V923yt6`lf0L#RgaK=8ORwSU4&x$z^1((t|B z$Yqc@&;lTzY{YPN0B}2@^~y0CC<|X$+yu~g16;|zd=$Mx(fIu5y!E?xq@*H9&q2mt zU~$V`waTB2*Zxv>GQkD}C?>%8f5eI@xm7{kw zBdTn{$SR~q${J>*|G2|QvYfz|| zYaBQ}u$Wj@`_Ww$r$udyqmwnIiUON~97NQ%Mc5Y}eVMTiblD!p2Jw3x5v31h?J`Yh z{Q;H16F{QinqkI=`zq-w)a}%v`l@XBYEkXr_%bqM7aJbAmFwPEns1YBRVR?UYZo|8 z8+)!Y6!oepEO5{f&mv9?P?OV+MOG&UY{Ehuf~H@nJmJ+mV1+E8=?Fq*E@P5?R8B;FnZ#3Hrh41MGf@g~TIMxRs{ z#3^_OxYPDjOJ#t3LVsfL?L8s7HD4EoJcJ*45RW3`wKLFpPzmj#xfBpW@HZEK9R1{f4x6hB7zq!mj&g@hwHo$-TkF?_5@hybY>UpjewaqI$7`_;$l-i*Zpj#?83btp6*CW>IO@a!2ZIisyT^;esN@(L+ zN;vJq=*u2|dGyyF^6y&USto_G!uq~}cAFrd7_=RJ&vI8m&HY;&_{~`NzgZY%C`qF! zN;}fl)J@wAllQ4VAKH~}eqR9;j4fB|%qiRId}mHEA6fMN?&qSw4HP2iIJND(AY=Wxb*J}pQK(%hiABZJZ<=&4Srxwo`%02%Bh{^>6TLZZF}U2aakc-{}vxtM6$5=^nE zVz6JpkW;@D`Q4wn^j-TT6`F_eJCP$4?COyH;M0kb!Q2N{dB6ZV=sT|`M@K=rMgsJY zbpNV5HoR(uzvEn1Kl7VB@m^&PAX6Ebt)WPx4 z2CS+qBx!a_!psD{^@*l4nr6>a!x!_Y`7qH?&VjP+3ED{0_($o^LzrrkSf60W>yM+I ztV~bPh9;=NNHLhkL=i!A%e@${ObPFY0G$Ptd`4n^D=@ zPFa}LYo=Wn?9}?|#NBXuQwz;TH0*Ny&G<1n+flUH3N9IsB9eUvpSuNic%B}YR7VSs zJ$_w?U2(O0M2d3cmtefgw43;qyF0pg&OndmbykVd8f}oXl3^MimsMJ6ZYUu6wt^%^ z4*gA!$NnpQT=K7ZS${4jkZ%6UsU8EswqI=_kve>SS#$np8*ZeHZy=%?^uyBXGPF6# zO=TBm51~~Lq2T>Y$!9B=aTk@s{JTcsxmJmdJSld$bF8y0&vYh&F=u!7tLd40XrlPR z$v{`U$>_PTMw_)(xbh7oS=Cuycnjoni^Zde80(OgV@|!J)R_-$lH2BR?*rqR`rPJK z)VOyvn;`GG%t#%dt%W^jkQf;x!pCW?1m}2jLVDCR8ACCSC!5&=SFhSnB~Idn&c}V0 zz@?|iXw(zDD^rWw&buB-+BUTLzD{H(`U$r#G@6LJALeili{BP8z`R=3LxxGe7*xC; zh~Q|XkLV&e#Ux6kY-I|lMvl8n4LXT{1Y<+3ml&rnbvL|t>L{Mu(WIOuak6xG*#+oZ z=MRk@^KJSIYc&^L;~s`VKC?pBfv6r}$x)^W%yBL&`&0XMr>(PVCmz1hkp7Vf-)u_P zXmz95tNaqkNdC1%60mG3CHy&23mNx9S?_zY_+<#&HxRz`X*!_ZAd+#8Wqi$83s@j3 z?+e>p`-&cuwO(pc&Oe{I{0dp;halcByz^%lt^QJW{uyAxI0LCj8W5{pDH}@Z=m}?ZhDvMk5^IBNROOc-8}gqd37boyZ1c7UC@r-Bmz0MPuP0q z$4kinWxY+5@xwLzRK8}NBIePe%kId+DBcQQk6_JsEO@RM~5P`k{370t}N<kf04$aNcr7d}d2nCT{kExH0XnFn-S)0}Fx@Bm8=@Qz2Y^ZPAFp}u{O|v9 zR?EMf+nW9cO34_6MyE_<$W{Fb7a{*xQo%pJkbn2rrKKwss&UFfBpz>h&ma== zf95XV{o|wbAFt}FV)xGDAs&Er_V-tAZagHpQ(3=3APMlyn^IK+=Nm{f^s%G9Lxihc z9FI%iI+B|Mr))J8`_H z2_TW(e6r2MF~C&SN4+36JlMnB`5G|FljY6_n0)^_=kHIg`CkF%pO|&OS84yNhBFG` ze*lCq?*E;L@QZ#uX!6}Z0%hO9vOj))L`Af%6RW;~sODN%D8G`7qD+gzH-MaI18M-c zmQ+^x88P__l<`-9_fMer&v&z(W%vcq$iJr!2}~A$_{pH?^&d6%gD$=14{(XtP5dD! z;!L`#F?n3aUcQ~FvrCI3CyP;LFcU^So?8#kOR_VuDv-r*54#vX4lFk7iEAfyH$^Ul zp-ef;*KUgHW|y^mfL;Qw?G7_Zh>*^SWt(tFUO&9cBDm&h0uV+$jgubq{KQd@qE*@o zE3(<=kr!s6iIV*W@?cT`5-|g?P{H=f$oUFqT~2N#jP6aACvZfKh>p64yw;3gNX525 zZcD{e{4WheZWPau>$?%8F-KV9BB?-nVuO+KsxmGhZz|8MO@BPE{L)M!NgA~JngC-& zYI`C5p*JN$k*jsDbsxeA>$}jNbKj31T&@}%78*36_=1}^F=jQo+W}*k%uCIA9e0+L zukIJtHhyW9yE8LpbvkCiTxCCRs`Jt3{jf_Y(Pv}1`RWGw0Ui4_6fhk8o)pR7kq9|Z z%F)-F%)moSfMF_F(^FP{V&IxAF^oP(x81X2?fIv#A*X~Pb~9>|b+6K*w!--3>F)(A za79nvBYLvp?2PKx6X8c^Ro+u%{rpbr?6ocpc5%98w8NV&|N4Y?ACVJXKc;8bs38Px zlWQA_5aB8!ZlAqxH)vqA?B`mUg4p_(o7xtl(8+hp;G&&(NIyP*uVeiXDi4lfURfws zXwqp!I*|+EmzekGZe{`Cz>o4-db@tob1f5_*ISU3x`EiWiv@N5x%7k*1IQ&Iz!o!D%iVxx`vIKR3UxCHfw z2q)6UNqis&F;qKpIQ5YNLEqXixcrtfHsa-7)Sc(m&)q{-XXEB_QgaL6?&Gs;I1)n@ zBIf4ekMoo`V}>TRCRPs$t%^J)3=HM^^@9OMwgu6FFl!|MB>Du?sew9qj34FGCFC;ECFnS&5lH$37E zCD6_Gge5{*?Q&8JpLMmmZ*S6Wh?yLZ6s|ZT?bnPT&NN!beCT`&x8u$UReUoB;;m&P zPaa2h6fvCZL`gas-xHE8v}rwx`lP2(sUy;4b+fDUY5&KM7q+TW2ie~A=N*ytR6&8y zEf6!S?>)6UIyA=Z59k&)Itq(k%RtHFZTxv+TAa&zJ-(ocvdW;n`%k<~T=8K^dbf94 z(W;?x+~y7AMmwgn#p_)(gP|NN4?!U0M5-0GC$gm<2YQ(O$TIK;YHbF*T`&u7_liuC ziAC(>v1GkXn3KAt1rstwHtBv2oeA;b_9~tOJ1g6*@973{SqvufMLnvdi1MB$>?nK1 zOTn6W(4ihi}c=OtTUu{NRs+P0Dm;~K)<6Gi7vGh1eRKVVZM-e2rK zBa5?cNAzstq>rdaB6+ByvhgTAZFTt-!OdLVQo5*3T6O?MOBQ93#T1Lff0~wWg|Za` zLd)^Y=x!_xhbhu^Bv$BuD9t(*=>!{)VIQ6j85?EPq!&+Y7Fe2wpqwKUa1Y; zOgPfFmcn3f1KinXwG%vtcQ6h^Mesk=IB;ORseb#;2Y)fnu(#h)jGF2U{{uW;N1qKd z?{%&ZRB2VC2$inizqqtt3h&+;`^Ym#LI2z7$cU6eM3NDSR2g|O26uCb!>Q*c)_C5` zyu2(G@&J|>a=uYdB^-!?_^@Rw7eU$HZtTlsVHS+{Zr%3J7+k@@!wm{MUMBQ(-)*1w z2y$F>q>Y8Pjg#6-UmpZ?)U=u8&g8h&}lG}*o>8&DU| z+tt}f8V(MBX|9Qj&1|0~ar3qSQc%E1mZxa-_X=boGR&3aW7(C&c`ufH!%Dy#t5m#$fl*4F zx}eOa)ItBqRiL)Qp`ZvQ@{MnUP6Y?b?R)e@?NoMjj&1E^TLR;S1FolhPP5wjiH=6c zm44BeVM{kg=ZB|i!UIC>Cu|udn3#3@==ji_Kv4o9@y*70Tc;eEipN>lc#QA;-)B04 zkp0<}iZ@uz=cc6MvXveh62hi}@l<6-zZ0zCAKmT-Q#@>Fl-JEYy4rnl)8kxI8GkGDs+y$r734eENX zFxusW+b+OdAr}!RwZg$s-obzdh)Lh#PO(|NLhU}cx2;}~x5^&wdm2ceV^F9bSa7Qs ztb&HV5-wk)9Gv_OG@liBSA<2%vbodptozhyKh_yl0~W}~opq!RWW<+U+#Uil;_Y@v z0SP0Lsr!{j%AFVhg&EgsdH%Uk;py8<^DqW&PJux==HShcwv;>qmJdN5!XK!JMHH$2lz%06f zbqlp<&hk0yTZRm!iG4d7Q~GiJ4!|up1|TG0aUk{7fV`5T_ToEQMC#y1MP^sI*-N&# zQoYecPUv;)F@p2Em55caV`MV#a=|-syRV55!DbfM zK@L{=)rn3|P3ms#I^DKM4t98$DF`B>$W(=+t!YJ;dU{`wN}hndm#g|5$GcUZNjh-# zq`}St#$5ad0v%n9D!AAU9v>-%Gpl8TW9u;^iXZli_X4x)9qo#7*Ah0!sae8@jFh^; z3nA5xR(tn@*`ic?zL64;ODpDT7;N=A1Lad?7o*W1|6Ah3XGXPtPrMzqIUP{cn0^Ak z)6;Q57t>Drz6^x}oAGKI+kib%-{AnfxFH5BIF}NhF;3B~8@EX5IFfcNC`($GV8|GoAkj9aHB-$$x{Dn_vbk>HF?pf!8WCnv{ zEy%R)jW4^sy6@sc$HXWFK4mzBbd#{YE%2$dN-n8+6TL2Cv`~5Q)`;{hj(u3!sJ_ch zFZaFAV?-LyTaG^Rs1!UPIa!iAm8Qbcc#{J;CG_D#k!><8MZl2;Y4dAzyYFpbkxfMb z@9i?2C9T@Xjk;UM!L;0>^?%{8Rb1YOlU`#l-}GWLJFT1ef>Nh>cby?>&%l6*ptIo# zoR8X)h`PmQmOc{;#YgwZusg$rW_*$7*o23ptqj~dd*@OS-GIHj-}X&6Xtp z7iV!a091QK+4a3EMi%A0R@)Q%@nyg&Gu2*}(%l`{jR?UbJTkEY!;Z@kMvyNkfHPm~ z^=4^DM+@im+)>wZ=)oSiGH_xai~7zMS4CUE+-T*ayMjp*)Mc4~l!{j@VuU%|#lGDfkA%LD-IJxA+cY z!$c-HwfqzX5)4w@YX1fj#=ZVZ36su1yZo$t@d*+sewBiDmT^M4Dt&58xda&mj#K_W z=QvfbBj4<+XOLxyiCx9(p`eSA^XQ}EWfn-SU(ZO=?xpe0nXkZw3hKz;?w7)c@?=c( zFzcEd?av#goGXU_j70WpJS5ToIqrA$0z%{md0)c?0E)Ty#bo;5sxaZHef%P{dp!WN zNtAMPA~J{K1%iE zoZMscXT>8ORqRZ0fZ5(aW%EgnJ5^9$ znfj!|vkBLWG%3d+{M_a`97d2qv7DhlNqzx? z=l=$}CNvI)Y|Q}ZaVuaIav3Xq$=V8d3XYTl^drB0AV2QG_xw8G(IO1}@PPoCp~Uso z0w44}p?&S^cK6~8KpFc0=YfB`HqHvfht(pR z@Iuqt7AB#TBwI%uRGpKQfU6x~`uwR9vR8WmGBOuHMlJxz$TKyc@Q}6uF{@un7X7(Q z{7=9n2T-|wE}8z<3enC41X_Qs_3U5k!zci-dIc)x*IJ4GwLYE$i=h9GYlDt-LokLM znB{H!NMxW=CPBnh0RWbcZ!Gt{l1bge^ljFzSm|F5pw-#quC9ZI5RP7f=ZX7{?)40{Y|5ZOcFQoz7N z(m4aKb^Y-q05i;d1IbZkV%j)FTob-rqEDW_taf^rDvinkLc2C%RQ|Q){G)8bzu1rd zckbi==4%r?3|}H?sIN&Bv{@;aZK#t5fz6bO>P4YN8NZGOB0yxLcWR~e;&#RrxtUSG z{~tQ}ZyMsibpT=#1SsYqqBYmcr#8!M69XZT#W`Rd3!ho~AO+AYZ0gPd(O0+c>NEd$ z466U;`hGimX>>GI(=}PCZ;YR=I8e)nj?C;e)OP*F;MZA6^$2z?RYn4t)qs53o&O97 zI{jOx-hZuILJZ)g39!5Pp4t~2Os`sc-uGI}#J?JbfJ$ zBaVhb;){nA56J3g1VcXE0fLJwEUn)_q($xmN(s4#*MpUS3$^=shD8|QLJb%p;JZZj z5PvCh{b^nK7YrA_-81TB&F%2j=7bPC&b-xW*^Tg?C>m*8ut24(Lkr~ z#`T+e$`L{)tKUEb;;SPh`%Q-8Yf(!wFK%lO#%4vC9ZzvYbi}MYBwH~-%S1%O(0%A~ zq?m!#k)^R{4_0)T}GO-dDlqU8z(!B*O?KT>H&u}D3q>gKK z4xBRy_@6oZD(aMhn`c^BJY5uP-~{jCBUY?F3sz5GaS2}QT*Aqm1T3f`c+rr`l_owB z8Zl2iIm-5|rQX!LicuypwP!_W1c!NL=?~r~EuBdHoO!2_skeIHVQsTD7m5f{OjDcY zH4nQPc<0z(=}Ukfp#}OxVqZ6sIk1p!69JMsUo=VaYTw{HEZML;co)8*8mGsr$sw~K z2alf$GWjkm`@2}LcE(Z%{P~5!2SX8WUtT3$(+lc;79*ZW9k4HTeUb~?pa)3c^NNNM zG2A$!_|hmH3vMm#cyc~MKB^g7TWt4008c|Y;{37i=$ufD)I8`jLsr-tU7)M(vjk#r zWSgnk)0b*k;wWVIteIE@l_wrNw3;5Be>`lMT^D)(Q71^}1;_$~d7*vtdlu$PwtEW* z`F7Zq;JOu5)ikE5(CihuN~X#SK~dr75j-{vh^p>%TsU`WJj^Srp}fUavsokZfr_|c z?}~oG?SmHcBN=2WXLQi#d8W93;Zc@Kofsi%R)=_II`xWrgmW5B>~ddHjUJwxoOC*M zZl?x5TqHwn7nDIO8Pnt{QJT@6u&a@$h2ET$f8d;(B&=GSi1d2>;Rp)0)~CP4;{S}v zzYALxq4V5?+!4omn?10xlO$-)YJ$V;c%y^bPuqoK#nBTjYu*yMsldL3#Uay1f7YZS zP{AS0RM*7zUI$}aP#{vN_NrfNk!QGA{fB9b3kCQN&ZDwQzVbjm9ZabLfrfn@y|ajB z{bEFWGuuj%j@H;bLmqo}V84bU_@z$DGisk~K_DWK`tEQuH;|8uXVn5M?O#ZD?~@8$To1Wb@&z^YB-AAJOOV<=X{Fs?XK3$5 zi@LN~BWjj#SY-pywZqtaH_5n12f2w-xv-P^nTvh#2b^jVK-u;fw~ri_kAo{@z& zk1oy{3otG*O2yJsm99IoOy2Y`=JW)}pZq9a?S4Z(0EWkCH~A~Vj~?^sZ`oqq(B>{1 zcyit>bIB|URAwZ+TK^tS@}kL|>E7Mdaj?@L4BP}K({(2{#fcFs-9j;?So^>_zT-`P zgB0r9+^}E$IYq?>bspYL;_|8;4TZ8%Ev1P=nn3RXOM99c-_v7x5*av;#=)bu z1b!7~;hR9;9lw-?OLV&|FMGPn>=kD&nuNw7Q>-BAyT-}rG{10`y)Wf8e+z%oM*yC1 z_-d_bg1fE4dfw%IZ;DWeD~Ou&{xgqIyu*+F9NA=R{(=HbgEfc5o^`tLAW!3V*7B0< zYIOw@>6@<$l4UU~csixdrO@cI?D`pdUN#%AMe=q*j$R?4*%ld)xzbwcdmenpwUy^G zCUy~o>(J(4K7J7Lm?aS6_)$mZkKapOdPLa5Kaxn#PZ8J*gs|&98U(QfDnxJ)y@RKE zP?=g#CrBb@yv=9<)Oh|anUJ10&3omF)AD(6XrMo7P8{mWR(d8yil%6#YZ@Vyd)_!ytR$LQ} zLHBA4aAT4J37B4+Njg0Va;J@@R@Q`+0wTX{*yR}=W!wwZ@Zgdq{ z4HrGIJtiLoMGOXqE`ilsZ-e?HT*CH#*H3{d7Y|tJ%w9tqfnETpEkGJCL@$E?L6~H( zj9ouSYg5^A=3TLO&1w+*OV#QbmP!+h+TDgCkZ(~IrH<3;!ERrf<#$o-Fmi{r_2;)e zE4FltRj0M;JgIP+kvBQUaPx{a z(LH#+vEip$WsJv(G5t7dy^++1*!{WJ=3{1>!r6N6c_uI2t+3J)s$Hdt z!5OBC8IC=2G<`N_OZFf(HPs%lcm1l|nSnLCc>Q&%r9H(<#8WxNeVeY|xs{{fP7f94 zbXmxyak2h~mmzj5149BmLt%@ehnMxLi>R@6daO?MRXB^svWLous?43QX^Cm?KNWG+ z2^e-{u+Hh`-!NtlRkv~IZ(Q$YCJgBRwp7K$(F`>(_ zxe0A{KR(dq?T&UYxOjb&Uj<7Vwc@jAXk$n)7qO3Uk&c3!ARfBeR)`q>wVCczuOqgO z9mbSnD=RNW*{X7|gO6a2vR7fYgh!!}{S%WxCn?f_#U^oWS}CyU$`dlOFwnt23{Jp+ zwIyWY0+V?VD2k>h;XDD}bpq!-JW8bsjUPiN^0%k`= zS@_s!o;p;x!y4*N_@)Y%IjH}<5{aQ9e1!_d<;)+=>8S4du_EX7QtRh;&v$m|J~q-_ zQnyf4j)k8`+Yuh>3%>57=BnHmRNq+IvX^p-IMJK@akuctjzL;)nz@>s6{bCjf#Ge6 zd4YOH^GZ^MZ@mh{A+{BV#=Wkmkf_&H_e#iOP5y#winaQpxB5b}K8yj$#>;NxK*Ii_ zUZw*_85}5A^L(lGogN}wHbd%z7j@v)z|v@fE?VL)eY=^{6q@xaw?k0!&AbF}Hxr&L z;Yot{J_mwNrpphgIcTTqFrC{lu@pg~D=pdiJz2{6vM#rc6blF83tX@0-0ro>GvWf4 z2FGY78ZGFM9H>pwjA%=vL*?e0a=OXp+a={U8!hH4`sp&ng?{)5(!C8I=^{xlvw}J3 z2pP`_TP&&6=z9l7IGX6LeoULwm-pI9&s#enQnEm4LxWM&6MUPLQ4{5W#4m=%=BnMR%n{G0j{nF z7fE&{N0W~eEJCC+oW^_5c5HgZb`}xQvHQzo_Y8toxZNpmk4(QB2OJ2AdeOyuyq9#f zmw8|od_a9HMl_dJP!t<~Vu41KiFV2OlK%3}6ZpGqg)?PpdQcPkhS;FAdJU*=4^&Kv zzA(SwV*p2SclH{q$vNh<*Y1oPuwm{?0QppMvI;~zInik$gqP+X9b=SXA>x(m1%>*G zN8^gg;&PFmY4@3{N#r>?i`l|QeOO0p7}(tIlJ1OMr7Ic_^1xX&b?M?q;uMRC0~0lm zj~64-#Zmk%E&-~wY~u`DDa6-x*=(pYL#StnvpIu(z-KVCPAG;`tL6T}d|7leook%= zj(n5lM!0iYMUM;V;|r&Hdn=%doO&d5>;9z%9m^WW28>?^cX3Oq^tMR^J1csX=7`#c zxagJ0bqDG#kb{jlQwcy{ez%$sg1~&cCafFHih9Cz_Q7!s?&VN$bgc$7E#JC31UVLH zc@^(1TXDI_*{*SFXJ`{QxxB*{$VWINngfCrWRZilX<9TjdIn;UN|wW}aVMK4^A?P@ zO@>nxPUcF)Xl2!m*_@QMjM)xgsqkT>EjQ&b?H2GTq@3#mkNH#$hR4hGHla!6XwH7c z%BGMA9+$wkE2-xG^p}s}cV=ESv#qAdnH`JFot`D%nv0;7q2NOX0&TVPaby239| zsR3nQf-R-glH4F&c?atIGHOJaW<-HfUANOX9~n9VMg;$!T@UL@1@IC@W%eZ(Eq9Tv$66^h8bu}7DH z%ucQ}iA-01-9NoR*b+c%6_-W*Bbe6>p=7cD$tozTcAlrO&KdoIR6cQ&-O?S1k5{g(z zfP*XN2RO{cD48P$qOysEW^!lJ{mE|8dg?)KpOLXZTI`2Y>*JFD%G*uIyDS<$k zaaLajE~V?I){d%A8(E5HJO~e1$n>)n;9UZJH^F}2q=hGg{SoSv>+Iuv z@|jK;{TH16y7?*% zC{4L)ChMoMu8t(x7Ec7QB|a{!soOB3Srr%B^1lPJM_eyWZ?!j8d&GIMmt@P7G^let zeq5fXgMA|NzJA9gV91N`m3e|6B|-_b_rPN+4TiF3E+4^!JsLyqFI?$sWAm@f*&*U< z4#-CN9X5)n9d1KQueaUDx`jG8Gq{EeJ1s!NU4CjZpeONSHyGNU3m_RBYQ}51P+#=7erjX~7 zmMe3y%maDZbQsb?lJW7HG^+n3ykWyTLTL{6r?e1m%j6=Kq-3U+`r=_f5~C^ z_kS+*1tu_~k$u4hZ&6;4q|;E75WhwHg%3%5i=5GVpgKv7`l;$7`GP(BGP{+=42J{< za77R2Aq}B*BBpZ(GIG3RcS)|Ep|pvcx5LfJ3!fq{Aipxhi@r&t+$52RhzwXd{->w- zEijbL+VHO9IoEa#hRz|zI}D3tn$)V5cYDmF06nq?@bH=Z9ZL7d-!vywC)Pj2D@AP* zHU}g3*;d|9!`r|A7HJV}n4h{n6XVa^jemUCJai)$ReYsfu-IXR%O@GP_Ba^ho{Wk? zKtUa%eF#rItz$N>j4aA#;CfB{mt)x$e21 zsIRZVogyqmpolVD%Z%9;?4#+eW!kLM-Sdae*(3VGVIrE; zXVwj5mvdW0rDd>;%XZQIi*F#i0ucP{GC>pzvdH-jBu|swvwW;^$^Q-HIC90D7)=T3 zQ$O0e>Ja^0sVC(74Md~dk#Po4naBGqZ_Gr(##7dm{Li!rv7vygHY5E?l#V{3Gd}cw zLu3+5`$sY?uWiGS&`aZNPq|y0Q+kh14vy~!WAxE}3^-DnBsxJlf8%XBg8@4{XY>PV ztKQ-|QC?x{X}AGm3@fGR0aQ=_9oVs+lqa{y@O`R@Z&iS@v+rbezF!fin8BwGbZZ`X$*FHDy(pru#2hiu=?(;B`EJ(;o%JM zc*~6gFX}3DJ`hrz_ifNl>#DP-^uDphoY%&Ai}Acq&awNJ!`o=y!G$Ql%{})K{PA5s zy0Pjz4BRQ(XL>^c_M<_(se_S>jQIVR{rM;{EnfoQgmM$EfI@+`mX`Zi*Uh9kzrg_U zWxaeq5ePvTD46U-3OOttjZjsOWA+^+C%8 zQg8U)H1~08Ok;5kr?D01s^gS-L)QF=Mk3Ypi30TEim3>$6p$^w;=RuyC^J3XpFc80AmG+jk6!*}w1=9&6Dm5x zKtxQbw<CIvNfp_I~h_&x|Yz_uiElIZYn`Ly8xZy27>S!SGf+7?;E~nBz;+Va(3S0 zaK#->bbR%7W)y$b_|7HodqmKM^aZD0{g~g#sRQTU_0toN^92vvWXh&;82_#qMZikg zOMo>JvQxPOnHRM^v7=ZXI0i_Wp3Ck&+oD_P>``AvE`P`paBsmoS}Fpt@$ogx-o*QZkip4t zC_8sXc;m)^HLsek6@igPhWU$-NRBR(h#LcMiG@a%68L%)p4v<{D`VDCkWOU=km#CC zu;K8H61ba(ek?!+(h))U-Iq9x0iQ80XlzHYGRi<{K6lwDr9BF8Q$9l2mkGm1)N@chDvR zynUxF*6Bkp%4I+L@HqOe;bCNHX4O$t1^0lh)Wp^cGJ^9>bxafg=S=~Zh%(CLX>Z7j zBSnsV`7)fhS^Q|u7+&}30-a7eJ2I5zK(qA-GdvO88n}5Rgo`j{J9bAcC+A6dB%O$Q zH+DADOQy6rzIC_K>^2d0FCl!MiZ$;H==O~HRB2dq9LKY}%q^pPLydL~J;KWTbOa+E z#<(@nn0j+v#kzS(ZutwZRyG+>Fkh+hm{vp297#9%*U5sk9sG!lx>m3=bq$#3UwCAz z#ZV?P&>Tmw+{Uf5oR^xm&vB`ye0`(mAE-M`z!c9`^7JoBK5>>AH$A}q96%heSGdbd zGe5$O4fE3|0@BrjKb(eAXg%&_#*NlY)DN1AdlD-};_Jdn-v96j1tuOh|jlHBIF7arJXW|Uy>SA5K~s_pBa zxjTKR$MO>emx*_zeV2-ui*Sz`lpj_fzOZZM$|hoDuZy~4=c5M5li{iZ^Pce)ePDEO z>+u7tj8g#L&BYx>i}d6*gkT}!2GmZ33WTP@_R=;KaPC!ZsBDyrJmEk0=i@8WM6{N+ zB&}9Y*sULLP0_GFB<&!gCe7<$0GYm4yQ}$(eS6D@cucgmnR?BztWr^Qj~BkasvJ|7 zGI?37d@xtecV02fsn>E3m_WVP_Ngzl9H5^FLLbf_+e4}#>gcv(9zZakZo_s8YX!mI@df>bsjRbyk!TiwDddZh}bFtJx2>{ zAqC)g%J&0>Kn(V(YN*;9!q$82)By9HJIMi9kDlFvGonA7b{?VL@w_bLdJ0JLHn1_a zJZ^5S_6q8sYjua7D4x|tz{r`x5X1l*we#N&61Pv#4!ycjLI$`~L{Wmu7d*sOTc3 zb&@w~sjW^vVqy54FuY`M6EPOMtEB$>z+HEMK+om)POP_6W`8vzV>t^ zKNmzni2#^l%D41(d6MGvcR0`2i?fU-&z%)<^6)ih1PNIZ0lTMJgcZy?dQVZy0e6Wj zFO1VQ7)?(oyk*6q&sa}GO4N+s z(${vBKcEWf-482YzV(F(a%*v}$Bk^hxXuy!CjJ35xN(H!adG>tXLNKfAkF}Pj!C4V zUpRp!(IngMEo?`omAVISuF~8{Q6I;q&8hVI)2fELKFh@J-h(};A5bRR0UCGK5#X!t z!;~g~t~Ql(+^=8%h$yX68mum(OQH~h6irQTGvO)^6;&N)K6K42wBT+wzcosQT|jhP z{^ET?_ju+JurWbiwJ)Eq3)9SCQyCjb^lj>371610gr{@N>b3%|7#q5lZZ7JF4^ zbDB}*m9Bh96YlV~4)V25Ve84R-lPf|KAgH;uPj2esjBUBX1bo=lqXkW-IS(^|N1^uCD2Jjvscl=c0EO8!5%B8DY+NL`*8?Rup(KyGHI zd*woIX$Fv+3ARG%>8wu0&`Zi zWu#oH)i>7#c{ty7e2`CTPU17{vFz5Wy}pmYRe>w$8IUmP&3EO4w>SEYz_-`#g$44Z zf>2CNBAg7n0BIGNFwSnL5lQn{aXQ~t!?8P8S0eky09ihb=E!5=qNMl^5z$c}_MACS z>jP}B{M;QwNm3^8$2GS^;Xuz9rUk?DcGNMP>AqNiV&;%`2ax#H?83}LK@~P5H?iiL zP)n4?TWHF9LtZHORL&8DG1XmYCU0J^-Nfcj!uxYa{l_t#Z&_-nku*$!g~NfhH9c}F zeB{LgxL@q4q}$75=wNL;M_9~ zOPx1@F}E9?^9QasVfBZ-+dI`NX#5tdx2iKx4G@69wv|O?V`wQ@l_<_@>~-%25?>$4 z?YUl9f77;CfOYLPI>7g(IPgP`{#-E0xlO1X!+DxaO^0VSOq^<^)wZ%_G9US(h9 zvoyEb0uGR;?y>L=EA|G5&@gVaM{TYZ4$9Tfwa;val~=oOYHA-8h)L zt{nO@nFx);U-cNV#xKj6&CYRrJ-8KqLCv+o$8U_Z_z}y=la{ zZXZ03l4f>pehVgAbU(u%^{i`)+eybQKN|KQZL1}`S8o$&f>}sbFsR_fW0dwgCC-#y`w#6Rhl*OZL}I!C98s8qd%{FzSc^-D~!0IQR5hQiR&7UD$OM@ zkMOcaRi-8vzAHACd2}6VmAydTXLC>OGV(yyit=U#X`*>7ZvoAbKil>rg)#Ac!jLc->BYky_hH{FB)aSzsbV>mG+^5{< zx7)a|m}_pWs*a9k8iqFc0MqP`2HWt}dga0Kne8`_&S8Xu zFq2*wXsv}5!K8TvIgLI|tv$Y~+FLT7o0slM*s_PV0MOl5W;dsF2jr%8%iZg->xslA z<67U8feS$y#BuZfLR}B#CFf<>eJ}1}{4b^Hnj^YT z#y@BQqPd^%aIP7)l%On8O%)$hZ=47ro=WPH(zid9%rPVjItzA54(Wj4db((*=pEhZ ze}~uUKD?rY5^|UDP^0Ss77?K>9O_Ru`3ZPjAC59Q9mow);0op2JooBdUkb-%J`47E zAWfafZ=?{1X)ovngwf?kdZ@9<13CSa9As-nm*2Q_~xmD5o7brnzFswHlCM z3gVInKz{2HsSlp(=?lY4qluOyZ<*sF2sDJr-19uUCpu^JOdVF`gKJxB>rYyoK?x}8 z_rozuBjowod@v){pO7p8=>t{j6D_QsdHuo|8g(_5)vg#W{Fdwtij_>V6EwBkSo`M2>=^MNgv{P~rG1u!g^2%>h}RGr@QHGvrqu{i>6Bw{YS9BZ~9 VT9o1hddB}ERp19|f#|on{|9>%vPS>_ literal 0 HcmV?d00001 From b1777f0ca473f2a21fb8829a2958311379b1ee9d Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 7 Aug 2020 16:06:29 +0800 Subject: [PATCH 24/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 8a86002628..e2f9ae7ce2 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -1,14 +1,12 @@ # Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search ## Introduction -One-shot weight sharing methods have recently drawn great attention in neural architecture search due to high efficiency and competitive performance. However, weight sharing across models has an inherent deficiency, i.e., insufficient training -of subnetworks in the hypernetwork. To alleviate this problem, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training -process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized -paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. We directly select the most promising -one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the -convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned -settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. -For more details, please refer to the paper (coming soon). +One-shot weight sharing methods have recently drawn great attention in neural architecture search due to high efficiency and competitive performance. However, weight sharing across models has an inherent deficiency, i.e., insufficient training of subnetworks in the hypernetwork. To alleviate this problem, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. We directly select the most promising one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. For more details, please refer to the paper (coming soon). + +

    + Ocean
    + +
    ## Reproduction Results Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior than 8 Gpus. @@ -22,6 +20,14 @@ Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior t | 470M | 78.9 | 79.2 | | 600M | 79.4 | 80.0 | +Top-1 Accuracy on ImageNet reported in the paper, supassing MobileNetV3 and EfficientNet-B0/B1. + + + + +
    drawingdrawing
    + + ## Requirements * python >= 3.6 * torch >= 1.2 From 4fcbaa96a73d1099a22f41dcec42fd4fcc868606 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 7 Aug 2020 16:13:43 +0800 Subject: [PATCH 25/62] Update CDARTS.md --- docs/en_US/NAS/CDARTS.md | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/docs/en_US/NAS/CDARTS.md b/docs/en_US/NAS/CDARTS.md index 8a1f9d4d9f..efceb8f57d 100644 --- a/docs/en_US/NAS/CDARTS.md +++ b/docs/en_US/NAS/CDARTS.md @@ -43,15 +43,10 @@ bash run_retrain_cifar.sh ### PyTorch ```eval_rst -.. autoclass:: nni.nas.pytorch.cdarts.CdartsTrainer +.. autoclass:: nni.nas.pytorch.cream.CreamSupernetTrainer :members: -.. autoclass:: nni.nas.pytorch.cdarts.RegularizedDartsMutator - :members: - -.. autoclass:: nni.nas.pytorch.cdarts.DartsDiscreteMutator - :members: - -.. autoclass:: nni.nas.pytorch.cdarts.RegularizedMutatorParallel +.. autoclass:: nni.nas.pytorch.cdarts.CreamSupernetTrainingMutator :members: + ``` From 2205433175a8b9474f6190d9e6c577e2afe70426 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 7 Aug 2020 16:18:49 +0800 Subject: [PATCH 26/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index e2f9ae7ce2..29d7191e63 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -106,3 +106,12 @@ sh ./test.sh ``` The test result will be saved in `./retrain`. You can configure the `--output` in `./test.sh` to specify a path for it. + +```eval_rst +.. autoclass:: nni.nas.pytorch.cream.CreamSupernetTrainer + :members: + +.. autoclass:: nni.nas.pytorch.cdarts.CreamSupernetTrainingMutator + :members: + +``` From b277b96e4dd4856f1ab024a286e43add82cd5212 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 7 Aug 2020 16:21:34 +0800 Subject: [PATCH 27/62] Update CDARTS.md --- docs/en_US/NAS/CDARTS.md | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/docs/en_US/NAS/CDARTS.md b/docs/en_US/NAS/CDARTS.md index efceb8f57d..c8cee29d48 100644 --- a/docs/en_US/NAS/CDARTS.md +++ b/docs/en_US/NAS/CDARTS.md @@ -4,7 +4,7 @@ CDARTS builds a cyclic feedback mechanism between the search and evaluation networks. First, the search network generates an initial topology for evaluation, so that the weights of the evaluation network can be optimized. Second, the architecture topology in the search network is further optimized by the label supervision in classification, as well as the regularization from the evaluation network through feature distillation. Repeating the above cycle results in a joint optimization of the search and evaluation networks, and thus enables the evolution of the topology to fit the final evaluation network. -In implementation of `CdartsTrainer`, it first instantiates two models and two mutators (one for each). The first model is the so-called "search network", which is mutated with a `RegularizedDartsMutator` -- a mutator with subtle differences with `DartsMutator`. The second model is the "evaluation network", which is mutated with a discrete mutator that leverages the previous search network mutator, to sample a single path each time. Trainers train models and mutators alternatively. Users can refer to [references](#reference) if they are interested in more details on these trainers and mutators. +In implementation of `CdartsTrainer`, it first instantiates two models and two mutators (one for each). The first model is the so-called "search network", which is mutated with a `RegularizedDartsMutator` -- a mutator with subtle differences with `DartsMutator`. The second model is the "evaluation network", which is mutated with a discrete mutator that leverages the previous search network mutator, to sample a single path each time. Trainers train models and mutators alternatively. Users can refer to [paper](https://arxiv.org/pdf/2006.10724.pdf) if they are interested in more details on these trainers and mutators. ## Reproduction Results @@ -43,10 +43,15 @@ bash run_retrain_cifar.sh ### PyTorch ```eval_rst -.. autoclass:: nni.nas.pytorch.cream.CreamSupernetTrainer +.. autoclass:: nni.nas.pytorch.cdarts.CdartsTrainer :members: -.. autoclass:: nni.nas.pytorch.cdarts.CreamSupernetTrainingMutator +.. autoclass:: nni.nas.pytorch.cdarts.RegularizedDartsMutator + :members: + +.. autoclass:: nni.nas.pytorch.cdarts.DartsDiscreteMutator + :members: + +.. autoclass:: nni.nas.pytorch.cdarts.RegularizedMutatorParallel :members: - ``` From 8d413bfaadf9cb85c23e82485bdbce5fee75575a Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Fri, 7 Aug 2020 16:21:59 +0800 Subject: [PATCH 28/62] Update CDARTS.md --- docs/en_US/NAS/CDARTS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en_US/NAS/CDARTS.md b/docs/en_US/NAS/CDARTS.md index c8cee29d48..19d2146cf3 100644 --- a/docs/en_US/NAS/CDARTS.md +++ b/docs/en_US/NAS/CDARTS.md @@ -2,7 +2,7 @@ ## Introduction -CDARTS builds a cyclic feedback mechanism between the search and evaluation networks. First, the search network generates an initial topology for evaluation, so that the weights of the evaluation network can be optimized. Second, the architecture topology in the search network is further optimized by the label supervision in classification, as well as the regularization from the evaluation network through feature distillation. Repeating the above cycle results in a joint optimization of the search and evaluation networks, and thus enables the evolution of the topology to fit the final evaluation network. +[CDARTS](https://arxiv.org/pdf/2006.10724.pdf) builds a cyclic feedback mechanism between the search and evaluation networks. First, the search network generates an initial topology for evaluation, so that the weights of the evaluation network can be optimized. Second, the architecture topology in the search network is further optimized by the label supervision in classification, as well as the regularization from the evaluation network through feature distillation. Repeating the above cycle results in a joint optimization of the search and evaluation networks, and thus enables the evolution of the topology to fit the final evaluation network. In implementation of `CdartsTrainer`, it first instantiates two models and two mutators (one for each). The first model is the so-called "search network", which is mutated with a `RegularizedDartsMutator` -- a mutator with subtle differences with `DartsMutator`. The second model is the "evaluation network", which is mutated with a discrete mutator that leverages the previous search network mutator, to sample a single path each time. Trainers train models and mutators alternatively. Users can refer to [paper](https://arxiv.org/pdf/2006.10724.pdf) if they are interested in more details on these trainers and mutators. From cc9f336935463093df378e421281e441d8b80c14 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Thu, 3 Sep 2020 22:06:21 +0800 Subject: [PATCH 29/62] Update distributed_train.sh --- examples/nas/cream/distributed_train.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/nas/cream/distributed_train.sh b/examples/nas/cream/distributed_train.sh index 6547b08537..cdd24647b5 100755 --- a/examples/nas/cream/distributed_train.sh +++ b/examples/nas/cream/distributed_train.sh @@ -1,4 +1,4 @@ #!/bin/bash NUM_PROC=$1 shift -python -m torch.distributed.launch --nproc_per_node=$NUM_PROC ./examples/nas/cream/supernet.py "$@" +python3 -m torch.distributed.launch --nproc_per_node=$NUM_PROC ./examples/nas/cream/supernet.py "$@" From 94944932cad6f1c91bf7e51c46194416b625f6b6 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Thu, 3 Sep 2020 22:06:31 +0800 Subject: [PATCH 30/62] Update distributed_test.sh --- examples/nas/cream/distributed_test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/nas/cream/distributed_test.sh b/examples/nas/cream/distributed_test.sh index 50a2ad7bb6..dbd5d83459 100755 --- a/examples/nas/cream/distributed_test.sh +++ b/examples/nas/cream/distributed_test.sh @@ -1,4 +1,4 @@ #!/bin/bash NUM_PROC=$1 shift -python -m torch.distributed.launch --nproc_per_node=$NUM_PROC ./examples/nas/cream/test.py "$@" +python3 -m torch.distributed.launch --nproc_per_node=$NUM_PROC ./examples/nas/cream/test.py "$@" From 6a332ff197c1775a58e509d51d148d61ee784400 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Thu, 3 Sep 2020 22:17:12 +0800 Subject: [PATCH 31/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 29d7191e63..8603f9deec 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -4,24 +4,23 @@ One-shot weight sharing methods have recently drawn great attention in neural architecture search due to high efficiency and competitive performance. However, weight sharing across models has an inherent deficiency, i.e., insufficient training of subnetworks in the hypernetwork. To alleviate this problem, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. We directly select the most promising one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. For more details, please refer to the paper (coming soon).
    - Ocean
    + Cream
    ## Reproduction Results -Top-1 Accuracy on ImageNet. The training with 16 Gpus is a little bit superior than 8 Gpus. +Top-1 Accuracy on ImageNet. The top-1 accuracy of Cream search algorithm surpasses MobileNetV3 and EfficientNet-B0/B1 on ImageNet. +The training with 16 Gpus is a little bit superior than 8 Gpus, as below. -| Model (M Flops) | NNI (8Gpus) | Paper (16Gpus) | +| Model (M Flops) | 8Gpus | 16Gpus | | ---- |:-------------:| :-----:| -| 14M | testing | 59.6 | +| 14M | 59.3 | 59.6 | | 42M | 65.8 | 66.5 | | 114M | 72.1 | 72.8 | | 285M | 76.7 | 77.6 | | 470M | 78.9 | 79.2 | | 600M | 79.4 | 80.0 | -Top-1 Accuracy on ImageNet reported in the paper, supassing MobileNetV3 and EfficientNet-B0/B1. - @@ -69,7 +68,7 @@ To search for an architecture, you need to configure the parameters `flops_minim --flops_maximum 600 # Maximum Flops of Architecture ``` -For example, if you expect to search an architecture with model Flops <= 200M, please set the `flops_minimum` and `flops_maximum` to be `0` and `200`. +For example, if you expect to search an architecture with model flops <= 200M, please set the `flops_minimum` and `flops_maximum` to be `0` and `200`. After you specify the flops of the architectures you would like to search, you can search an architecture now by running: @@ -77,7 +76,7 @@ After you specify the flops of the architectures you would like to search, you c sh ./run.sh ``` -Searched model needs to be retrained to obtain the final model. Retraining code will be released soon. +The searched architectures need to be retrained and obtain the final model. The final model is saved in `.pth.tar` format. Retraining code will be released soon. ### II. Test From b35ccacac50f115a2e51752ba95e9abfc3e1aebd Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 27 Sep 2020 12:02:39 +0900 Subject: [PATCH 32/62] init --- examples/nas/cream/supernet.py | 4 +- examples/nas/cream/utils/flops_table.py | 4 +- .../pynni/nni/nas/pytorch/cream/__init__.py | 1 - .../pynni/nni/nas/pytorch/cream/trainer.py | 75 +++++++++++-------- 4 files changed, 47 insertions(+), 37 deletions(-) mode change 100755 => 100644 examples/nas/cream/supernet.py mode change 100755 => 100644 examples/nas/cream/utils/flops_table.py mode change 100755 => 100644 src/sdk/pynni/nni/nas/pytorch/cream/trainer.py diff --git a/examples/nas/cream/supernet.py b/examples/nas/cream/supernet.py old mode 100755 new mode 100644 index 47c5294289..f55065e9af --- a/examples/nas/cream/supernet.py +++ b/examples/nas/cream/supernet.py @@ -27,7 +27,7 @@ from torch.utils.tensorboard import SummaryWriter from nni.nas.pytorch.cream import CreamSupernetTrainer -from nni.nas.pytorch.cream import CreamSupernetTrainingMutator +from nni.nas.pytorch.random import RandomMutator logger = logging.getLogger("nni.cream.supernet") @@ -372,7 +372,7 @@ def main(): criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda() val_loss = nn.CrossEntropyLoss().cuda() - mutator = CreamSupernetTrainingMutator(model, args.how_to_prob, args.pre_prob, choice_num, sta_num) + mutator = RandomMutator(model) trainer = CreamSupernetTrainer(model, criterion, optimizer, args.epochs, train_loader=loader_train, valid_loader=loader_eval, diff --git a/examples/nas/cream/utils/flops_table.py b/examples/nas/cream/utils/flops_table.py old mode 100755 new mode 100644 index b4d60d849e..0cc597c6f3 --- a/examples/nas/cream/utils/flops_table.py +++ b/examples/nas/cream/utils/flops_table.py @@ -2,7 +2,7 @@ from ptflops import get_model_complexity_info class LatencyEst(object): - def __init__(self, model, input_shape=(1, 3, 224, 224), device='cpu'): + def __init__(self, model, input_shape=(2, 3, 224, 224), device='cpu'): self.block_num = len(model.blocks) self.choice_num = len(model.blocks[0]) self.latency_dict = {} @@ -17,7 +17,7 @@ def __init__(self, model, input_shape=(1, 3, 224, 224), device='cpu'): self.params_fixed = 0 self.flops_fixed = 0 - input = torch.randn((2, 3, 224, 224)) + input = torch.randn(input_shape) flops, params = get_model_complexity_info(model.conv_stem, (3, 224, 224), as_strings=False, print_per_layer_stat=False) self.params_fixed += params / 1e6 diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/__init__.py b/src/sdk/pynni/nni/nas/pytorch/cream/__init__.py index 2429e996f3..43a038b467 100755 --- a/src/sdk/pynni/nni/nas/pytorch/cream/__init__.py +++ b/src/sdk/pynni/nni/nas/pytorch/cream/__init__.py @@ -2,4 +2,3 @@ # Licensed under the MIT license. from .trainer import CreamSupernetTrainer -from .mutator import CreamSupernetTrainingMutator diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py old mode 100755 new mode 100644 index b6a28340d4..a245d91b44 --- a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py +++ b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py @@ -22,31 +22,42 @@ class CreamSupernetTrainer(Trainer): ---------- model : nn.Module Model with mutables. - mutator : Mutator - A mutator object that has been initialized with the model. loss : callable Called with logits and targets. Returns a loss tensor. - metrics : callable - Returns a dict that maps metrics keys to metrics data. optimizer : Optimizer Optimizer that optimizes the model. num_epochs : int Number of epochs of training. - train_loader : iterable + train_loader : iterablez Data loader of training. Raise ``StopIteration`` when one epoch is exhausted. - dataset_valid : iterable + valid_loader : iterablez Data loader of validation. Raise ``StopIteration`` when one epoch is exhausted. + mutator : Mutator + A mutator object that has been initialized with the model. batch_size : int Batch size. - workers: int - Number of threads for data preprocessing. Not used for this trainer. Maybe removed in future. - device : torch.device - Device object. Either ``torch.device("cuda")`` or ``torch.device("cpu")``. When ``None``, trainer will - automatic detects GPU and selects GPU first. log_frequency : int Number of mini-batches to log metrics. - callbacks : list of Callback - Callbacks to plug into the trainer. See Callbacks. + est : object + look-up table of flops and parameters + meta_sta_epoch : int + starting epoch of using meta picking + update_iter : int + interval of updating meta networks + slices : int + batch size of mini slices + pool_size : int + board size + pick_method : basestring + how to pick teacher network + lr_scheduler : scheduler + Learning rate scheduler + distributed : bool + whether to use distributed training + local_rank : int + index of current rank + val_loss : callable + calculate validation loss """ def __init__(self, model, loss, @@ -130,14 +141,14 @@ def get_model(model): elif self.pick_method == 'meta': meta_value, cand_idx, cand = -1000000000, -1, None for now_idx, item in enumerate(self.best_children_pool): - inputx = item[3] + inputx = item['input'] output = F.softmax(self.model(inputx), dim=1) - weight = get_model(self.model).forward_meta(output - item[4]) + weight = get_model(self.model).forward_meta(output - item['feature_map']) if weight > meta_value: meta_value = weight # deepcopy(torch.nn.functional.sigmoid(weight)) cand_idx = now_idx - cand = self.arch_dict[(self.best_children_pool[cand_idx][0], - self.best_children_pool[cand_idx][2])] + cand = self.arch_dict[(self.best_children_pool[cand_idx]['acc'], + self.best_children_pool[cand_idx]['arch_list'])] assert cand is not None meta_value = torch.nn.functional.sigmoid(-weight) else: @@ -173,7 +184,7 @@ def raw_sgd(w, g): for weight, grad_item in zip(get_model(self.model).rand_parameters(self.mutator._cache), grad_1): del weight.grad - held_out_x = input_data[slice_ind:slice_ind * 2].clone() + held_out_x = deepcopy(input_data[slice_ind:slice_ind * 2].clone().detach()) output_2 = self.model(held_out_x) valid_loss = self.loss(output_2, target[slice_ind:slice_ind * 2]) self.optimizer.zero_grad() @@ -215,35 +226,34 @@ def raw_sgd(w, g): elif self.pick_method == 'meta': meta_value, cand_idx, cand = -1000000000, -1, None for now_idx, item in enumerate(self.best_children_pool): - inputx = item[3] + inputx = item['input'] output = F.softmax(self.model(inputx), dim=1) - weight = get_model(self.model).forward_meta(output - item[4]) + weight = get_model(self.model).forward_meta(output - item['feature_map']) if weight > meta_value: meta_value = weight cand_idx = now_idx - cand = self.arch_dict[(self.best_children_pool[cand_idx][0], - self.best_children_pool[cand_idx][2])] + cand = self.arch_dict[(self.best_children_pool[cand_idx]['acc'], + self.best_children_pool[cand_idx]['arch_list'])] assert cand is not None meta_value = torch.nn.functional.sigmoid(-weight) else: raise ValueError('Method Not supported') - if not self.best_children_pool: - output = self.model(input) + output = self.model(input_data) loss = self.loss(output, target) kd_loss = loss elif epoch <= self.meta_sta_epoch: - output = self.model(input) + output = self.model(input_data) loss = self.loss(output, target) else: - output = self.model(input) + output = self.model(input_data) with torch.no_grad(): # save student arch saved_cache = self.mutator._cache self.mutator._cache = cand # forward - teacher_output = self.model(input).detach() + teacher_output = self.model(input_data).detach() # restore student arch self.mutator._cache = saved_cache @@ -262,8 +272,8 @@ def raw_sgd(w, g): meters.update(metrics) if epoch > self.meta_sta_epoch and ( - (len(self.best_children_pool) < self.pool_size) or (prec1 > self.best_children_pool[-1][1] + 5) or - (prec1 > self.best_children_pool[-1][1] and cand_flops < self.best_children_pool[-1][2])): + (len(self.best_children_pool) < self.pool_size) or (prec1 > self.best_children_pool[-1]['acc'] + 5) or + (prec1 > self.best_children_pool[-1]['acc'] and cand_flops < self.best_children_pool[-1]['flops'])): val_prec1 = prec1 training_data = deepcopy(input_data[:self.slices].detach()) if not self.best_children_pool: @@ -271,12 +281,13 @@ def raw_sgd(w, g): else: features = deepcopy(teacher_output[:self.slices].detach()) self.best_children_pool.append( - (val_prec1, prec1, cand_flops, training_data, F.softmax(features, dim=1))) + {'acc': val_prec1, 'accu': prec1, 'flops': cand_flops, 'input': training_data, + 'feature_map': F.softmax(features, dim=1)}) self.arch_dict[(val_prec1, cand_flops)] = self.mutator._cache - self.best_children_pool = sorted(self.best_children_pool, reverse=True) + self.best_children_pool = sorted(self.best_children_pool, key=lambda x: x['acc'], reverse=True) if len(self.best_children_pool) > self.pool_size: - self.best_children_pool = sorted(self.best_children_pool, reverse=True) + self.best_children_pool = sorted(self.best_children_pool, key=lambda x: x['acc'], reverse=True) del self.best_children_pool[-1] if self.lr_scheduler is not None: From 928961407338e52a74a1c20381e61014cc7f7906 Mon Sep 17 00:00:00 2001 From: lzuqer Date: Sun, 27 Sep 2020 11:24:40 +0800 Subject: [PATCH 33/62] Update supernet.py --- examples/nas/cream/supernet.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/examples/nas/cream/supernet.py b/examples/nas/cream/supernet.py index f55065e9af..87d26ae910 100644 --- a/examples/nas/cream/supernet.py +++ b/examples/nas/cream/supernet.py @@ -16,15 +16,15 @@ from torch.nn.parallel import DistributedDataParallel as DDP has_apex = False -from dataset import Dataset, create_loader, resolve_data_config +from timm.data import Dataset, create_loader, resolve_data_config, FastCollateMixup, DatasetTar +from timm.models import create_model, resume_checkpoint + +from timm.utils import * +from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy +from timm.scheduler import create_scheduler + from models.hypernet import _gen_supernet -from utils.flops_table import LatencyEst -from utils.helpers import * -from utils.EMA import ModelEma -from utils.saver import CheckpointSaver -from utils.loss import LabelSmoothingCrossEntropy -from utils.scheduler import create_scheduler -from torch.utils.tensorboard import SummaryWriter +from flops_table import LatencyEst from nni.nas.pytorch.cream import CreamSupernetTrainer from nni.nas.pytorch.random import RandomMutator From ab9d398ab6e4709568798968d651438f1feae7ba Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 27 Sep 2020 12:26:27 +0900 Subject: [PATCH 34/62] 1)remove timm --- examples/nas/cream/dataset/__init__.py | 3 - examples/nas/cream/dataset/auto_augment.py | 789 ------------------ examples/nas/cream/dataset/base_dataset.py | 197 ----- examples/nas/cream/dataset/loader.py | 241 ------ examples/nas/cream/dataset/processing.py | 204 ----- examples/nas/cream/dataset/tiny_imagenet.py | 166 ---- examples/nas/cream/dataset/transform.py | 182 ---- examples/nas/cream/dataset/utils.py | 303 ------- examples/nas/cream/{utils => }/flops_table.py | 0 examples/nas/cream/models/builder.py | 8 +- examples/nas/cream/models/hbuilder.py | 6 +- examples/nas/cream/models/units.py | 355 -------- examples/nas/cream/models/utils.py | 123 --- examples/nas/cream/utils/EMA.py | 66 -- examples/nas/cream/utils/__init__.py | 0 examples/nas/cream/utils/helpers.py | 169 ---- examples/nas/cream/utils/loss.py | 31 - examples/nas/cream/utils/optimizer.py | 162 ---- examples/nas/cream/utils/saver.py | 140 ---- examples/nas/cream/utils/scheduler.py | 309 ------- 20 files changed, 7 insertions(+), 3447 deletions(-) delete mode 100755 examples/nas/cream/dataset/__init__.py delete mode 100755 examples/nas/cream/dataset/auto_augment.py delete mode 100755 examples/nas/cream/dataset/base_dataset.py delete mode 100755 examples/nas/cream/dataset/loader.py delete mode 100755 examples/nas/cream/dataset/processing.py delete mode 100755 examples/nas/cream/dataset/tiny_imagenet.py delete mode 100755 examples/nas/cream/dataset/transform.py delete mode 100755 examples/nas/cream/dataset/utils.py rename examples/nas/cream/{utils => }/flops_table.py (100%) delete mode 100755 examples/nas/cream/models/units.py delete mode 100755 examples/nas/cream/models/utils.py delete mode 100755 examples/nas/cream/utils/EMA.py delete mode 100755 examples/nas/cream/utils/__init__.py delete mode 100755 examples/nas/cream/utils/helpers.py delete mode 100755 examples/nas/cream/utils/loss.py delete mode 100755 examples/nas/cream/utils/optimizer.py delete mode 100755 examples/nas/cream/utils/saver.py delete mode 100755 examples/nas/cream/utils/scheduler.py diff --git a/examples/nas/cream/dataset/__init__.py b/examples/nas/cream/dataset/__init__.py deleted file mode 100755 index 14620b5d4d..0000000000 --- a/examples/nas/cream/dataset/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from dataset.loader import create_loader -from dataset.base_dataset import Dataset, AugMixDataset -from dataset.utils import resolve_data_config \ No newline at end of file diff --git a/examples/nas/cream/dataset/auto_augment.py b/examples/nas/cream/dataset/auto_augment.py deleted file mode 100755 index ce8c1f02b5..0000000000 --- a/examples/nas/cream/dataset/auto_augment.py +++ /dev/null @@ -1,789 +0,0 @@ -import random -import math -import re -from PIL import Image, ImageOps, ImageEnhance, ImageChops -import PIL -import numpy as np - - -_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]]) - -_FILL = (128, 128, 128) - -# This signifies the max integer that the controller RNN could predict for the -# augmentation scheme. -_MAX_LEVEL = 10. - -_HPARAMS_DEFAULT = dict( - translate_const=250, - img_mean=_FILL, -) - -_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) - - -def _interpolation(kwargs): - interpolation = kwargs.pop('resample', Image.BILINEAR) - if isinstance(interpolation, (list, tuple)): - return random.choice(interpolation) - else: - return interpolation - - -def _check_args_tf(kwargs): - if 'fillcolor' in kwargs and _PIL_VER < (5, 0): - kwargs.pop('fillcolor') - kwargs['resample'] = _interpolation(kwargs) - - -def shear_x(img, factor, **kwargs): - _check_args_tf(kwargs) - return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs) - - -def shear_y(img, factor, **kwargs): - _check_args_tf(kwargs) - return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs) - - -def translate_x_rel(img, pct, **kwargs): - pixels = pct * img.size[0] - _check_args_tf(kwargs) - return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) - - -def translate_y_rel(img, pct, **kwargs): - pixels = pct * img.size[1] - _check_args_tf(kwargs) - return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) - - -def translate_x_abs(img, pixels, **kwargs): - _check_args_tf(kwargs) - return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) - - -def translate_y_abs(img, pixels, **kwargs): - _check_args_tf(kwargs) - return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) - - -def rotate(img, degrees, **kwargs): - _check_args_tf(kwargs) - if _PIL_VER >= (5, 2): - return img.rotate(degrees, **kwargs) - elif _PIL_VER >= (5, 0): - w, h = img.size - post_trans = (0, 0) - rotn_center = (w / 2.0, h / 2.0) - angle = -math.radians(degrees) - matrix = [ - round(math.cos(angle), 15), - round(math.sin(angle), 15), - 0.0, - round(-math.sin(angle), 15), - round(math.cos(angle), 15), - 0.0, - ] - - def transform(x, y, matrix): - (a, b, c, d, e, f) = matrix - return a * x + b * y + c, d * x + e * y + f - - matrix[2], matrix[5] = transform( - -rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix - ) - matrix[2] += rotn_center[0] - matrix[5] += rotn_center[1] - return img.transform(img.size, Image.AFFINE, matrix, **kwargs) - else: - return img.rotate(degrees, resample=kwargs['resample']) - - -def auto_contrast(img, **__): - return ImageOps.autocontrast(img) - - -def invert(img, **__): - return ImageOps.invert(img) - - -def equalize(img, **__): - return ImageOps.equalize(img) - - -def solarize(img, thresh, **__): - return ImageOps.solarize(img, thresh) - - -def solarize_add(img, add, thresh=128, **__): - lut = [] - for i in range(256): - if i < thresh: - lut.append(min(255, i + add)) - else: - lut.append(i) - if img.mode in ("L", "RGB"): - if img.mode == "RGB" and len(lut) == 256: - lut = lut + lut + lut - return img.point(lut) - else: - return img - - -def posterize(img, bits_to_keep, **__): - if bits_to_keep >= 8: - return img - return ImageOps.posterize(img, bits_to_keep) - - -def contrast(img, factor, **__): - return ImageEnhance.Contrast(img).enhance(factor) - - -def color(img, factor, **__): - return ImageEnhance.Color(img).enhance(factor) - - -def brightness(img, factor, **__): - return ImageEnhance.Brightness(img).enhance(factor) - - -def sharpness(img, factor, **__): - return ImageEnhance.Sharpness(img).enhance(factor) - - -def _randomly_negate(v): - """With 50% prob, negate the value""" - return -v if random.random() > 0.5 else v - - -def _rotate_level_to_arg(level, _hparams): - # range [-30, 30] - level = (level / _MAX_LEVEL) * 30. - level = _randomly_negate(level) - return level, - - -def _enhance_level_to_arg(level, _hparams): - # range [0.1, 1.9] - return (level / _MAX_LEVEL) * 1.8 + 0.1, - - -def _enhance_increasing_level_to_arg(level, _hparams): - # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend - # range [0.1, 1.9] - level = (level / _MAX_LEVEL) * .9 - level = 1.0 + _randomly_negate(level) - return level, - - -def _shear_level_to_arg(level, _hparams): - # range [-0.3, 0.3] - level = (level / _MAX_LEVEL) * 0.3 - level = _randomly_negate(level) - return level, - - -def _translate_abs_level_to_arg(level, hparams): - translate_const = hparams['translate_const'] - level = (level / _MAX_LEVEL) * float(translate_const) - level = _randomly_negate(level) - return level, - - -def _translate_rel_level_to_arg(level, hparams): - # default range [-0.45, 0.45] - translate_pct = hparams.get('translate_pct', 0.45) - level = (level / _MAX_LEVEL) * translate_pct - level = _randomly_negate(level) - return level, - - -def _posterize_level_to_arg(level, _hparams): - # As per Tensorflow TPU EfficientNet impl - # range [0, 4], 'keep 0 up to 4 MSB of original image' - # intensity/severity of augmentation decreases with level - return int((level / _MAX_LEVEL) * 4), - - -def _posterize_increasing_level_to_arg(level, hparams): - # As per Tensorflow models research and UDA impl - # range [4, 0], 'keep 4 down to 0 MSB of original image', - # intensity/severity of augmentation increases with level - return 4 - _posterize_level_to_arg(level, hparams)[0], - - -def _posterize_original_level_to_arg(level, _hparams): - # As per original AutoAugment paper description - # range [4, 8], 'keep 4 up to 8 MSB of image' - # intensity/severity of augmentation decreases with level - return int((level / _MAX_LEVEL) * 4) + 4, - - -def _solarize_level_to_arg(level, _hparams): - # range [0, 256] - # intensity/severity of augmentation decreases with level - return int((level / _MAX_LEVEL) * 256), - - -def _solarize_increasing_level_to_arg(level, _hparams): - # range [0, 256] - # intensity/severity of augmentation increases with level - return 256 - _solarize_level_to_arg(level, _hparams)[0], - - -def _solarize_add_level_to_arg(level, _hparams): - # range [0, 110] - return int((level / _MAX_LEVEL) * 110), - - -LEVEL_TO_ARG = { - 'AutoContrast': None, - 'Equalize': None, - 'Invert': None, - 'Rotate': _rotate_level_to_arg, - # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers - 'Posterize': _posterize_level_to_arg, - 'PosterizeIncreasing': _posterize_increasing_level_to_arg, - 'PosterizeOriginal': _posterize_original_level_to_arg, - 'Solarize': _solarize_level_to_arg, - 'SolarizeIncreasing': _solarize_increasing_level_to_arg, - 'SolarizeAdd': _solarize_add_level_to_arg, - 'Color': _enhance_level_to_arg, - 'ColorIncreasing': _enhance_increasing_level_to_arg, - 'Contrast': _enhance_level_to_arg, - 'ContrastIncreasing': _enhance_increasing_level_to_arg, - 'Brightness': _enhance_level_to_arg, - 'BrightnessIncreasing': _enhance_increasing_level_to_arg, - 'Sharpness': _enhance_level_to_arg, - 'SharpnessIncreasing': _enhance_increasing_level_to_arg, - 'ShearX': _shear_level_to_arg, - 'ShearY': _shear_level_to_arg, - 'TranslateX': _translate_abs_level_to_arg, - 'TranslateY': _translate_abs_level_to_arg, - 'TranslateXRel': _translate_rel_level_to_arg, - 'TranslateYRel': _translate_rel_level_to_arg, -} - - -NAME_TO_OP = { - 'AutoContrast': auto_contrast, - 'Equalize': equalize, - 'Invert': invert, - 'Rotate': rotate, - 'Posterize': posterize, - 'PosterizeIncreasing': posterize, - 'PosterizeOriginal': posterize, - 'Solarize': solarize, - 'SolarizeIncreasing': solarize, - 'SolarizeAdd': solarize_add, - 'Color': color, - 'ColorIncreasing': color, - 'Contrast': contrast, - 'ContrastIncreasing': contrast, - 'Brightness': brightness, - 'BrightnessIncreasing': brightness, - 'Sharpness': sharpness, - 'SharpnessIncreasing': sharpness, - 'ShearX': shear_x, - 'ShearY': shear_y, - 'TranslateX': translate_x_abs, - 'TranslateY': translate_y_abs, - 'TranslateXRel': translate_x_rel, - 'TranslateYRel': translate_y_rel, -} - - -class AugmentOp: - - def __init__(self, name, prob=0.5, magnitude=10, hparams=None): - hparams = hparams or _HPARAMS_DEFAULT - self.aug_fn = NAME_TO_OP[name] - self.level_fn = LEVEL_TO_ARG[name] - self.prob = prob - self.magnitude = magnitude - self.hparams = hparams.copy() - self.kwargs = dict( - fillcolor=hparams['img_mean'] if 'img_mean' in hparams else _FILL, - resample=hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION, - ) - - # If magnitude_std is > 0, we introduce some randomness - # in the usually fixed policy and sample magnitude from a normal distribution - # with mean `magnitude` and std-dev of `magnitude_std`. - # NOTE This is my own hack, being tested, not in papers or reference impls. - self.magnitude_std = self.hparams.get('magnitude_std', 0) - - def __call__(self, img): - if self.prob < 1.0 and random.random() > self.prob: - return img - magnitude = self.magnitude - if self.magnitude_std and self.magnitude_std > 0: - magnitude = random.gauss(magnitude, self.magnitude_std) - magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range - level_args = self.level_fn(magnitude, self.hparams) if self.level_fn is not None else tuple() - return self.aug_fn(img, *level_args, **self.kwargs) - - -def auto_augment_policy_v0(hparams): - # ImageNet v0 policy from TPU EfficientNet impl, cannot find a paper reference. - policy = [ - [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], - [('Color', 0.4, 9), ('Equalize', 0.6, 3)], - [('Color', 0.4, 1), ('Rotate', 0.6, 8)], - [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], - [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], - [('Color', 0.2, 0), ('Equalize', 0.8, 8)], - [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], - [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], - [('Color', 0.6, 1), ('Equalize', 1.0, 2)], - [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], - [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], - [('Color', 0.4, 7), ('Equalize', 0.6, 0)], - [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], - [('Solarize', 0.6, 8), ('Color', 0.6, 9)], - [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], - [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], - [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], - [('ShearY', 0.8, 0), ('Color', 0.6, 4)], - [('Color', 1.0, 0), ('Rotate', 0.6, 2)], - [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], - [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], - [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], - [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], # This results in black image with Tpu posterize - [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], - [('Color', 0.8, 6), ('Rotate', 0.4, 5)], - ] - pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] - return pc - - -def auto_augment_policy_v0r(hparams): - # ImageNet v0 policy from TPU EfficientNet impl, with variation of Posterize used - # in Google research implementation (number of bits discarded increases with magnitude) - policy = [ - [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], - [('Color', 0.4, 9), ('Equalize', 0.6, 3)], - [('Color', 0.4, 1), ('Rotate', 0.6, 8)], - [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], - [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], - [('Color', 0.2, 0), ('Equalize', 0.8, 8)], - [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], - [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], - [('Color', 0.6, 1), ('Equalize', 1.0, 2)], - [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], - [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], - [('Color', 0.4, 7), ('Equalize', 0.6, 0)], - [('PosterizeIncreasing', 0.4, 6), ('AutoContrast', 0.4, 7)], - [('Solarize', 0.6, 8), ('Color', 0.6, 9)], - [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], - [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], - [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], - [('ShearY', 0.8, 0), ('Color', 0.6, 4)], - [('Color', 1.0, 0), ('Rotate', 0.6, 2)], - [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], - [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], - [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], - [('PosterizeIncreasing', 0.8, 2), ('Solarize', 0.6, 10)], - [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], - [('Color', 0.8, 6), ('Rotate', 0.4, 5)], - ] - pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] - return pc - - -def auto_augment_policy_original(hparams): - # ImageNet policy from https://arxiv.org/abs/1805.09501 - policy = [ - [('PosterizeOriginal', 0.4, 8), ('Rotate', 0.6, 9)], - [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], - [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], - [('PosterizeOriginal', 0.6, 7), ('PosterizeOriginal', 0.6, 6)], - [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], - [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], - [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], - [('PosterizeOriginal', 0.8, 5), ('Equalize', 1.0, 2)], - [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], - [('Equalize', 0.6, 8), ('PosterizeOriginal', 0.4, 6)], - [('Rotate', 0.8, 8), ('Color', 0.4, 0)], - [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], - [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], - [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], - [('Color', 0.6, 4), ('Contrast', 1.0, 8)], - [('Rotate', 0.8, 8), ('Color', 1.0, 2)], - [('Color', 0.8, 8), ('Solarize', 0.8, 7)], - [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], - [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], - [('Color', 0.4, 0), ('Equalize', 0.6, 3)], - [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], - [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], - [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], - [('Color', 0.6, 4), ('Contrast', 1.0, 8)], - [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], - ] - pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] - return pc - - -def auto_augment_policy_originalr(hparams): - # ImageNet policy from https://arxiv.org/abs/1805.09501 with research posterize variation - policy = [ - [('PosterizeIncreasing', 0.4, 8), ('Rotate', 0.6, 9)], - [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], - [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], - [('PosterizeIncreasing', 0.6, 7), ('PosterizeIncreasing', 0.6, 6)], - [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], - [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], - [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], - [('PosterizeIncreasing', 0.8, 5), ('Equalize', 1.0, 2)], - [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], - [('Equalize', 0.6, 8), ('PosterizeIncreasing', 0.4, 6)], - [('Rotate', 0.8, 8), ('Color', 0.4, 0)], - [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], - [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], - [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], - [('Color', 0.6, 4), ('Contrast', 1.0, 8)], - [('Rotate', 0.8, 8), ('Color', 1.0, 2)], - [('Color', 0.8, 8), ('Solarize', 0.8, 7)], - [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], - [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], - [('Color', 0.4, 0), ('Equalize', 0.6, 3)], - [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], - [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], - [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], - [('Color', 0.6, 4), ('Contrast', 1.0, 8)], - [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], - ] - pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] - return pc - - -def auto_augment_policy(name='v0', hparams=None): - hparams = hparams or _HPARAMS_DEFAULT - if name == 'original': - return auto_augment_policy_original(hparams) - elif name == 'originalr': - return auto_augment_policy_originalr(hparams) - elif name == 'v0': - return auto_augment_policy_v0(hparams) - elif name == 'v0r': - return auto_augment_policy_v0r(hparams) - else: - assert False, 'Unknown AA policy (%s)' % name - - -class AutoAugment: - - def __init__(self, policy): - self.policy = policy - - def __call__(self, img): - sub_policy = random.choice(self.policy) - for op in sub_policy: - img = op(img) - return img - - -def auto_augment_transform(config_str, hparams): - """ - Create a AutoAugment transform - :param config_str: String defining configuration of auto augmentation. Consists of multiple sections separated by - dashes ('-'). The first section defines the AutoAugment policy (one of 'v0', 'v0r', 'original', 'originalr'). - The remaining sections, not order sepecific determine - 'mstd' - float std deviation of magnitude noise applied - Ex 'original-mstd0.5' results in AutoAugment with original policy, magnitude_std 0.5 - :param hparams: Other hparams (kwargs) for the AutoAugmentation scheme - :return: A PyTorch compatible Transform - """ - config = config_str.split('-') - policy_name = config[0] - config = config[1:] - for c in config: - cs = re.split(r'(\d.*)', c) - if len(cs) < 2: - continue - key, val = cs[:2] - if key == 'mstd': - # noise param injected via hparams for now - hparams.setdefault('magnitude_std', float(val)) - else: - assert False, 'Unknown AutoAugment config section' - aa_policy = auto_augment_policy(policy_name, hparams=hparams) - return AutoAugment(aa_policy) - - -_RAND_TRANSFORMS = [ - 'AutoContrast', - 'Equalize', - 'Invert', - 'Rotate', - 'Posterize', - 'Solarize', - 'SolarizeAdd', - 'Color', - 'Contrast', - 'Brightness', - 'Sharpness', - 'ShearX', - 'ShearY', - 'TranslateXRel', - 'TranslateYRel', - #'Cutout' # NOTE I've implement this as random erasing separately -] - - -_RAND_INCREASING_TRANSFORMS = [ - 'AutoContrast', - 'Equalize', - 'Invert', - 'Rotate', - 'PosterizeIncreasing', - 'SolarizeIncreasing', - 'SolarizeAdd', - 'ColorIncreasing', - 'ContrastIncreasing', - 'BrightnessIncreasing', - 'SharpnessIncreasing', - 'ShearX', - 'ShearY', - 'TranslateXRel', - 'TranslateYRel', - #'Cutout' # NOTE I've implement this as random erasing separately -] - - - -# These experimental weights are based loosely on the relative improvements mentioned in paper. -# They may not result in increased performance, but could likely be tuned to so. -_RAND_CHOICE_WEIGHTS_0 = { - 'Rotate': 0.3, - 'ShearX': 0.2, - 'ShearY': 0.2, - 'TranslateXRel': 0.1, - 'TranslateYRel': 0.1, - 'Color': .025, - 'Sharpness': 0.025, - 'AutoContrast': 0.025, - 'Solarize': .005, - 'SolarizeAdd': .005, - 'Contrast': .005, - 'Brightness': .005, - 'Equalize': .005, - 'Posterize': 0, - 'Invert': 0, -} - - -def _select_rand_weights(weight_idx=0, transforms=None): - transforms = transforms or _RAND_TRANSFORMS - assert weight_idx == 0 # only one set of weights currently - rand_weights = _RAND_CHOICE_WEIGHTS_0 - probs = [rand_weights[k] for k in transforms] - probs /= np.sum(probs) - return probs - - -def rand_augment_ops(magnitude=10, hparams=None, transforms=None): - hparams = hparams or _HPARAMS_DEFAULT - transforms = transforms or _RAND_TRANSFORMS - return [AugmentOp( - name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms] - - -class RandAugment: - def __init__(self, ops, num_layers=2, choice_weights=None): - self.ops = ops - self.num_layers = num_layers - self.choice_weights = choice_weights - - def __call__(self, img): - # no replacement when using weighted choice - ops = np.random.choice( - self.ops, self.num_layers, replace=self.choice_weights is None, p=self.choice_weights) - for op in ops: - img = op(img) - return img - - -def rand_augment_transform(config_str, hparams): - """ - Create a RandAugment transform - :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by - dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining - sections, not order sepecific determine - 'm' - integer magnitude of rand augment - 'n' - integer num layers (number of transform ops selected per image) - 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op) - 'mstd' - float std deviation of magnitude noise applied - 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0) - Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5 - 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2 - :param hparams: Other hparams (kwargs) for the RandAugmentation scheme - :return: A PyTorch compatible Transform - """ - magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10) - num_layers = 2 # default to 2 ops per image - weight_idx = None # default to no probability weights for op choice - transforms = _RAND_TRANSFORMS - config = config_str.split('-') - assert config[0] == 'rand' - config = config[1:] - for c in config: - cs = re.split(r'(\d.*)', c) - if len(cs) < 2: - continue - key, val = cs[:2] - if key == 'mstd': - # noise param injected via hparams for now - hparams.setdefault('magnitude_std', float(val)) - elif key == 'inc': - if bool(val): - transforms = _RAND_INCREASING_TRANSFORMS - elif key == 'm': - magnitude = int(val) - elif key == 'n': - num_layers = int(val) - elif key == 'w': - weight_idx = int(val) - else: - assert False, 'Unknown RandAugment config section' - ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms) - choice_weights = None if weight_idx is None else _select_rand_weights(weight_idx) - return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) - - -_AUGMIX_TRANSFORMS = [ - 'AutoContrast', - 'ColorIncreasing', # not in paper - 'ContrastIncreasing', # not in paper - 'BrightnessIncreasing', # not in paper - 'SharpnessIncreasing', # not in paper - 'Equalize', - 'Rotate', - 'PosterizeIncreasing', - 'SolarizeIncreasing', - 'ShearX', - 'ShearY', - 'TranslateXRel', - 'TranslateYRel', -] - - -def augmix_ops(magnitude=10, hparams=None, transforms=None): - hparams = hparams or _HPARAMS_DEFAULT - transforms = transforms or _AUGMIX_TRANSFORMS - return [AugmentOp( - name, prob=1.0, magnitude=magnitude, hparams=hparams) for name in transforms] - - -class AugMixAugment: - """ AugMix Transform - Adapted and improved from impl here: https://github.com/google-research/augmix/blob/master/imagenet.py - From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - - https://arxiv.org/abs/1912.02781 - """ - def __init__(self, ops, alpha=1., width=3, depth=-1, blended=False): - self.ops = ops - self.alpha = alpha - self.width = width - self.depth = depth - self.blended = blended # blended mode is faster but not well tested - - def _calc_blended_weights(self, ws, m): - ws = ws * m - cump = 1. - rws = [] - for w in ws[::-1]: - alpha = w / cump - cump *= (1 - alpha) - rws.append(alpha) - return np.array(rws[::-1], dtype=np.float32) - - def _apply_blended(self, img, mixing_weights, m): - # This is my first crack and implementing a slightly faster mixed augmentation. Instead - # of accumulating the mix for each chain in a Numpy array and then blending with original, - # it recomputes the blending coefficients and applies one PIL image blend per chain. - # TODO the results appear in the right ballpark but they differ by more than rounding. - img_orig = img.copy() - ws = self._calc_blended_weights(mixing_weights, m) - for w in ws: - depth = self.depth if self.depth > 0 else np.random.randint(1, 4) - ops = np.random.choice(self.ops, depth, replace=True) - img_aug = img_orig # no ops are in-place, deep copy not necessary - for op in ops: - img_aug = op(img_aug) - img = Image.blend(img, img_aug, w) - return img - - def _apply_basic(self, img, mixing_weights, m): - # This is a literal adaptation of the paper/official implementation without normalizations and - # PIL <-> Numpy conversions between every op. It is still quite CPU compute heavy compared to the - # typical augmentation transforms, could use a GPU / Kornia implementation. - img_shape = img.size[0], img.size[1], len(img.getbands()) - mixed = np.zeros(img_shape, dtype=np.float32) - for mw in mixing_weights: - depth = self.depth if self.depth > 0 else np.random.randint(1, 4) - ops = np.random.choice(self.ops, depth, replace=True) - img_aug = img # no ops are in-place, deep copy not necessary - for op in ops: - img_aug = op(img_aug) - mixed += mw * np.asarray(img_aug, dtype=np.float32) - np.clip(mixed, 0, 255., out=mixed) - mixed = Image.fromarray(mixed.astype(np.uint8)) - return Image.blend(img, mixed, m) - - def __call__(self, img): - mixing_weights = np.float32(np.random.dirichlet([self.alpha] * self.width)) - m = np.float32(np.random.beta(self.alpha, self.alpha)) - if self.blended: - mixed = self._apply_blended(img, mixing_weights, m) - else: - mixed = self._apply_basic(img, mixing_weights, m) - return mixed - - -def augment_and_mix_transform(config_str, hparams): - """ Create AugMix PyTorch transform - :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by - dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining - sections, not order sepecific determine - 'm' - integer magnitude (severity) of augmentation mix (default: 3) - 'w' - integer width of augmentation chain (default: 3) - 'd' - integer depth of augmentation chain (-1 is random [1, 3], default: -1) - 'b' - integer (bool), blend each branch of chain into end result without a final blend, less CPU (default: 0) - 'mstd' - float std deviation of magnitude noise applied (default: 0) - Ex 'augmix-m5-w4-d2' results in AugMix with severity 5, chain width 4, chain depth 2 - :param hparams: Other hparams (kwargs) for the Augmentation transforms - :return: A PyTorch compatible Transform - """ - magnitude = 3 - width = 3 - depth = -1 - alpha = 1. - blended = False - config = config_str.split('-') - assert config[0] == 'augmix' - config = config[1:] - for c in config: - cs = re.split(r'(\d.*)', c) - if len(cs) < 2: - continue - key, val = cs[:2] - if key == 'mstd': - # noise param injected via hparams for now - hparams.setdefault('magnitude_std', float(val)) - elif key == 'm': - magnitude = int(val) - elif key == 'w': - width = int(val) - elif key == 'd': - depth = int(val) - elif key == 'a': - alpha = float(val) - elif key == 'b': - blended = bool(val) - else: - assert False, 'Unknown AugMix config section' - ops = augmix_ops(magnitude=magnitude, hparams=hparams) - return AugMixAugment(ops, alpha=alpha, width=width, depth=depth, blended=blended) \ No newline at end of file diff --git a/examples/nas/cream/dataset/base_dataset.py b/examples/nas/cream/dataset/base_dataset.py deleted file mode 100755 index 222caffda3..0000000000 --- a/examples/nas/cream/dataset/base_dataset.py +++ /dev/null @@ -1,197 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import torch.utils.data as data - -import os -import re -import torch -import tarfile -from PIL import Image - - -IMG_EXTENSIONS = ['.png', '.jpg', '.jpeg'] - - -def natural_key(string_): - """See http://www.codinghorror.com/blog/archives/001018.html""" - return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] - - -def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True): - labels = [] - filenames = [] - for root, subdirs, files in os.walk(folder, topdown=False): - rel_path = os.path.relpath(root, folder) if (root != folder) else '' - label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_') - for f in files: - base, ext = os.path.splitext(f) - if ext.lower() in types: - filenames.append(os.path.join(root, f)) - labels.append(label) - if class_to_idx is None: - # building class index - unique_labels = set(labels) - sorted_labels = list(sorted(unique_labels, key=natural_key)) - class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} - images_and_targets = zip(filenames, [class_to_idx[l] for l in labels]) - if sort: - images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0])) - return images_and_targets, class_to_idx - - -def load_class_map(filename, root=''): - class_to_idx = {} - class_map_path = filename - if not os.path.exists(class_map_path): - class_map_path = os.path.join(root, filename) - assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % filename - class_map_ext = os.path.splitext(filename)[-1].lower() - if class_map_ext == '.txt': - with open(class_map_path) as f: - class_to_idx = {v.strip(): k for k, v in enumerate(f)} - else: - assert False, 'Unsupported class map extension' - return class_to_idx - - -class Dataset(data.Dataset): - - def __init__( - self, - root, - load_bytes=False, - transform=None, - class_map=''): - - class_to_idx = None - if class_map: - class_to_idx = load_class_map(class_map, root) - images, class_to_idx = find_images_and_targets(root, class_to_idx=class_to_idx) - if len(images) == 0: - raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" - "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) - self.root = root - self.samples = images - self.imgs = self.samples # torchvision ImageFolder compat - self.class_to_idx = class_to_idx - self.load_bytes = load_bytes - self.transform = transform - - def __getitem__(self, index): - path, target = self.samples[index] - img = open(path, 'rb').read() if self.load_bytes else Image.open(path).convert('RGB') - if self.transform is not None: - img = self.transform(img) - if target is None: - target = torch.zeros(1).long() - return img, target - - def __len__(self): - return len(self.imgs) - - def filenames(self, indices=[], basename=False): - if indices: - if basename: - return [os.path.basename(self.samples[i][0]) for i in indices] - else: - return [self.samples[i][0] for i in indices] - else: - if basename: - return [os.path.basename(x[0]) for x in self.samples] - else: - return [x[0] for x in self.samples] - - -def _extract_tar_info(tarfile, class_to_idx=None, sort=True): - files = [] - labels = [] - for ti in tarfile.getmembers(): - if not ti.isfile(): - continue - dirname, basename = os.path.split(ti.path) - label = os.path.basename(dirname) - ext = os.path.splitext(basename)[1] - if ext.lower() in IMG_EXTENSIONS: - files.append(ti) - labels.append(label) - if class_to_idx is None: - unique_labels = set(labels) - sorted_labels = list(sorted(unique_labels, key=natural_key)) - class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} - tarinfo_and_targets = zip(files, [class_to_idx[l] for l in labels]) - if sort: - tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path)) - return tarinfo_and_targets, class_to_idx - - -class DatasetTar(data.Dataset): - - def __init__(self, root, load_bytes=False, transform=None, class_map=''): - - class_to_idx = None - if class_map: - class_to_idx = load_class_map(class_map, root) - assert os.path.isfile(root) - self.root = root - with tarfile.open(root) as tf: # cannot keep this open across processes, reopen later - self.samples, self.class_to_idx = _extract_tar_info(tf, class_to_idx) - self.tarfile = None # lazy init in __getitem__ - self.load_bytes = load_bytes - self.transform = transform - - def __getitem__(self, index): - if self.tarfile is None: - self.tarfile = tarfile.open(self.root) - tarinfo, target = self.samples[index] - iob = self.tarfile.extractfile(tarinfo) - img = iob.read() if self.load_bytes else Image.open(iob).convert('RGB') - if self.transform is not None: - img = self.transform(img) - if target is None: - target = torch.zeros(1).long() - return img, target - - def __len__(self): - return len(self.samples) - - -class AugMixDataset(torch.utils.data.Dataset): - """Dataset wrapper to perform AugMix or other clean/augmentation mixes""" - - def __init__(self, dataset, num_splits=2): - self.augmentation = None - self.normalize = None - self.dataset = dataset - if self.dataset.transform is not None: - self._set_transforms(self.dataset.transform) - self.num_splits = num_splits - - def _set_transforms(self, x): - assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms' - self.dataset.transform = x[0] - self.augmentation = x[1] - self.normalize = x[2] - - @property - def transform(self): - return self.dataset.transform - - @transform.setter - def transform(self, x): - self._set_transforms(x) - - def _normalize(self, x): - return x if self.normalize is None else self.normalize(x) - - def __getitem__(self, i): - x, y = self.dataset[i] # all splits share the same dataset base transform - x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split) - # run the full augmentation on the remaining splits - for _ in range(self.num_splits - 1): - x_list.append(self._normalize(self.augmentation(x))) - return tuple(x_list), y - - def __len__(self): - return len(self.dataset) \ No newline at end of file diff --git a/examples/nas/cream/dataset/loader.py b/examples/nas/cream/dataset/loader.py deleted file mode 100755 index 7f00135879..0000000000 --- a/examples/nas/cream/dataset/loader.py +++ /dev/null @@ -1,241 +0,0 @@ -import torch.utils.data -import numpy as np - -from dataset.transform import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, create_transform -from dataset.utils import RandomErasing - -import math -import torch -from torch.utils.data import Sampler -import torch.distributed as dist - - -class OrderedDistributedSampler(Sampler): - """Sampler that restricts data loading to a subset of the dataset. - It is especially useful in conjunction with - :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each - process can pass a DistributedSampler instance as a DataLoader sampler, - and load a subset of the original dataset that is exclusive to it. - .. note:: - Dataset is assumed to be of constant size. - Arguments: - dataset: Dataset used for sampling. - num_replicas (optional): Number of processes participating in - distributed training. - rank (optional): Rank of the current process within num_replicas. - """ - - def __init__(self, dataset, num_replicas=None, rank=None): - if num_replicas is None: - if not dist.is_available(): - raise RuntimeError("Requires distributed package to be available") - num_replicas = dist.get_world_size() - if rank is None: - if not dist.is_available(): - raise RuntimeError("Requires distributed package to be available") - rank = dist.get_rank() - self.dataset = dataset - self.num_replicas = num_replicas - self.rank = rank - self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) - self.total_size = self.num_samples * self.num_replicas - - def __iter__(self): - indices = list(range(len(self.dataset))) - - # add extra samples to make it evenly divisible - indices += indices[:(self.total_size - len(indices))] - assert len(indices) == self.total_size - - # subsample - indices = indices[self.rank:self.total_size:self.num_replicas] - assert len(indices) == self.num_samples - - return iter(indices) - - def __len__(self): - return self.num_samples - - -def fast_collate(batch): - """ A fast collation function optimized for uint8 images (np array or torch) and int64 targets (labels)""" - assert isinstance(batch[0], tuple) - batch_size = len(batch) - if isinstance(batch[0][0], tuple): - # This branch 'deinterleaves' and flattens tuples of input tensors into one tensor ordered by position - # such that all tuple of position n will end up in a torch.split(tensor, batch_size) in nth position - inner_tuple_size = len(batch[0][0]) - flattened_batch_size = batch_size * inner_tuple_size - targets = torch.zeros(flattened_batch_size, dtype=torch.int64) - tensor = torch.zeros((flattened_batch_size, *batch[0][0][0].shape), dtype=torch.uint8) - for i in range(batch_size): - assert len(batch[i][0]) == inner_tuple_size # all input tensor tuples must be same length - for j in range(inner_tuple_size): - targets[i + j * batch_size] = batch[i][1] - tensor[i + j * batch_size] += torch.from_numpy(batch[i][0][j]) - return tensor, targets - elif isinstance(batch[0][0], np.ndarray): - targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) - assert len(targets) == batch_size - tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) - for i in range(batch_size): - tensor[i] += torch.from_numpy(batch[i][0]) - return tensor, targets - elif isinstance(batch[0][0], torch.Tensor): - targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) - assert len(targets) == batch_size - tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) - for i in range(batch_size): - tensor[i].copy_(batch[i][0]) - return tensor, targets - else: - assert False - - -class PrefetchLoader: - - def __init__(self, - loader, - mean=IMAGENET_DEFAULT_MEAN, - std=IMAGENET_DEFAULT_STD, - fp16=False, - re_prob=0., - re_mode='const', - re_count=1, - re_num_splits=0): - self.loader = loader - self.mean = torch.tensor([x * 255 for x in mean]).cuda().view(1, 3, 1, 1) - self.std = torch.tensor([x * 255 for x in std]).cuda().view(1, 3, 1, 1) - self.fp16 = fp16 - if fp16: - self.mean = self.mean.half() - self.std = self.std.half() - if re_prob > 0.: - self.random_erasing = RandomErasing( - probability=re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits) - else: - self.random_erasing = None - - def __iter__(self): - stream = torch.cuda.Stream() - first = True - input = target = None - - for next_input, next_target in self.loader: - with torch.cuda.stream(stream): - next_input = next_input.cuda(non_blocking=True) - next_target = next_target.cuda(non_blocking=True) - if self.fp16: - next_input = next_input.half().sub_(self.mean).div_(self.std) - else: - next_input = next_input.float().sub_(self.mean).div_(self.std) - if self.random_erasing is not None: - next_input = self.random_erasing(next_input) - - if not first: - yield input, target - else: - first = False - - torch.cuda.current_stream().wait_stream(stream) - input = next_input - target = next_target - - yield input, target - - def __len__(self): - return len(self.loader) - - @property - def sampler(self): - return self.loader.sampler - - @property - def dataset(self): - return self.loader.dataset - - -def create_loader( - dataset, - input_size, - batch_size, - is_training=False, - use_prefetcher=True, - re_prob=0., - re_mode='const', - re_count=1, - re_split=False, - color_jitter=0.4, - auto_augment=None, - num_aug_splits=0, - interpolation='bilinear', - mean=IMAGENET_DEFAULT_MEAN, - std=IMAGENET_DEFAULT_STD, - num_workers=1, - distributed=False, - crop_pct=None, - collate_fn=None, - pin_memory=False, - fp16=False, - tf_preprocessing=False, - use_multi_epochs_loader=False -): - re_num_splits = 0 - if re_split: - # apply RE to second half of batch if no aug split otherwise line up with aug split - re_num_splits = num_aug_splits or 2 - dataset.transform = create_transform( - input_size, - is_training=is_training, - use_prefetcher=use_prefetcher, - color_jitter=color_jitter, - auto_augment=auto_augment, - interpolation=interpolation, - mean=mean, - std=std, - crop_pct=crop_pct, - tf_preprocessing=tf_preprocessing, - re_prob=re_prob, - re_mode=re_mode, - re_count=re_count, - re_num_splits=re_num_splits, - separate=num_aug_splits > 0, - ) - - sampler = None - if distributed: - if is_training: - sampler = torch.utils.data.distributed.DistributedSampler(dataset) - else: - # This will add extra duplicate entries to result in equal num - # of samples per-process, will slightly alter validation results - sampler = OrderedDistributedSampler(dataset) - - if collate_fn is None: - collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate - - loader_class = torch.utils.data.DataLoader - - loader = loader_class( - dataset, - batch_size=batch_size, - shuffle=sampler is None and is_training, - num_workers=num_workers, - sampler=sampler, - collate_fn=collate_fn, - pin_memory=pin_memory, - drop_last=is_training, - ) - if use_prefetcher: - loader = PrefetchLoader( - loader, - mean=mean, - std=std, - fp16=fp16, - re_prob=re_prob if is_training else 0., - re_mode=re_mode, - re_count=re_count, - re_num_splits=re_num_splits - ) - - return loader diff --git a/examples/nas/cream/dataset/processing.py b/examples/nas/cream/dataset/processing.py deleted file mode 100755 index a6f3dbf219..0000000000 --- a/examples/nas/cream/dataset/processing.py +++ /dev/null @@ -1,204 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf -import numpy as np - -IMAGE_SIZE = 224 -CROP_PADDING = 32 - - -def distorted_bounding_box_crop(image_bytes, - bbox, - min_object_covered=0.1, - aspect_ratio_range=(0.75, 1.33), - area_range=(0.05, 1.0), - max_attempts=100, - scope=None): - """Generates cropped_image using one of the bboxes randomly distorted. - See `tf.image.sample_distorted_bounding_box` for more documentation. - Args: - image_bytes: `Tensor` of binary image data. - bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` - where each coordinate is [0, 1) and the coordinates are arranged - as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole - image. - min_object_covered: An optional `float`. Defaults to `0.1`. The cropped - area of the image must contain at least this fraction of any bounding - box supplied. - aspect_ratio_range: An optional list of `float`s. The cropped area of the - image must have an aspect ratio = width / height within this range. - area_range: An optional list of `float`s. The cropped area of the image - must contain a fraction of the supplied image within in this range. - max_attempts: An optional `int`. Number of attempts at generating a cropped - region of the image of the specified constraints. After `max_attempts` - failures, return the entire image. - scope: Optional `str` for name scope. - Returns: - cropped image `Tensor` - """ - with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]): - shape = tf.image.extract_jpeg_shape(image_bytes) - sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( - shape, - bounding_boxes=bbox, - min_object_covered=min_object_covered, - aspect_ratio_range=aspect_ratio_range, - area_range=area_range, - max_attempts=max_attempts, - use_image_if_no_bounding_boxes=True) - bbox_begin, bbox_size, _ = sample_distorted_bounding_box - - # Crop the image to the specified bounding box. - offset_y, offset_x, _ = tf.unstack(bbox_begin) - target_height, target_width, _ = tf.unstack(bbox_size) - crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) - image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) - - return image - - -def _at_least_x_are_equal(a, b, x): - """At least `x` of `a` and `b` `Tensors` are equal.""" - match = tf.equal(a, b) - match = tf.cast(match, tf.int32) - return tf.greater_equal(tf.reduce_sum(match), x) - - -def _decode_and_random_crop(image_bytes, image_size, resize_method): - """Make a random crop of image_size.""" - bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) - image = distorted_bounding_box_crop( - image_bytes, - bbox, - min_object_covered=0.1, - aspect_ratio_range=(3. / 4, 4. / 3.), - area_range=(0.08, 1.0), - max_attempts=10, - scope=None) - original_shape = tf.image.extract_jpeg_shape(image_bytes) - bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3) - - image = tf.cond( - bad, - lambda: _decode_and_center_crop(image_bytes, image_size), - lambda: tf.image.resize([image], [image_size, image_size], resize_method)[0]) - - return image - - -def _decode_and_center_crop(image_bytes, image_size, resize_method): - """Crops to center of image with padding then scales image_size.""" - shape = tf.image.extract_jpeg_shape(image_bytes) - image_height = shape[0] - image_width = shape[1] - - padded_center_crop_size = tf.cast( - ((image_size / (image_size + CROP_PADDING)) * - tf.cast(tf.minimum(image_height, image_width), tf.float32)), - tf.int32) - - offset_height = ((image_height - padded_center_crop_size) + 1) // 2 - offset_width = ((image_width - padded_center_crop_size) + 1) // 2 - crop_window = tf.stack([offset_height, offset_width, - padded_center_crop_size, padded_center_crop_size]) - image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) - image = tf.image.resize([image], [image_size, image_size], resize_method)[0] - - return image - - -def _flip(image): - """Random horizontal image flip.""" - image = tf.image.random_flip_left_right(image) - return image - - -def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): - """Preprocesses the given image for evaluation. - Args: - image_bytes: `Tensor` representing an image binary of arbitrary size. - use_bfloat16: `bool` for whether to use bfloat16. - image_size: image size. - interpolation: image interpolation method - Returns: - A preprocessed image `Tensor`. - """ - resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR - image = _decode_and_random_crop(image_bytes, image_size, resize_method) - image = _flip(image) - image = tf.reshape(image, [image_size, image_size, 3]) - image = tf.image.convert_image_dtype( - image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) - return image - - -def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): - """Preprocesses the given image for evaluation. - Args: - image_bytes: `Tensor` representing an image binary of arbitrary size. - use_bfloat16: `bool` for whether to use bfloat16. - image_size: image size. - interpolation: image interpolation method - Returns: - A preprocessed image `Tensor`. - """ - resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR - image = _decode_and_center_crop(image_bytes, image_size, resize_method) - image = tf.reshape(image, [image_size, image_size, 3]) - image = tf.image.convert_image_dtype( - image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) - return image - - -def preprocess_image(image_bytes, - is_training=False, - use_bfloat16=False, - image_size=IMAGE_SIZE, - interpolation='bicubic'): - """Preprocesses the given image. - Args: - image_bytes: `Tensor` representing an image binary of arbitrary size. - is_training: `bool` for whether the preprocessing is for training. - use_bfloat16: `bool` for whether to use bfloat16. - image_size: image size. - interpolation: image interpolation method - Returns: - A preprocessed image `Tensor` with value range of [0, 255]. - """ - if is_training: - return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation) - else: - return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation) - - -class TfPreprocessTransform: - - def __init__(self, is_training=False, size=224, interpolation='bicubic'): - self.is_training = is_training - self.size = size[0] if isinstance(size, tuple) else size - self.interpolation = interpolation - self._image_bytes = None - self.process_image = self._build_tf_graph() - self.sess = None - - def _build_tf_graph(self): - with tf.device('/cpu:0'): - self._image_bytes = tf.placeholder( - shape=[], - dtype=tf.string, - ) - img = preprocess_image( - self._image_bytes, self.is_training, False, self.size, self.interpolation) - return img - - def __call__(self, image_bytes): - if self.sess is None: - self.sess = tf.Session() - img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes}) - img = img.round().clip(0, 255).astype(np.uint8) - if img.ndim < 3: - img = np.expand_dims(img, axis=-1) - img = np.rollaxis(img, 2) # HWC to CHW - return img \ No newline at end of file diff --git a/examples/nas/cream/dataset/tiny_imagenet.py b/examples/nas/cream/dataset/tiny_imagenet.py deleted file mode 100755 index cedafe4810..0000000000 --- a/examples/nas/cream/dataset/tiny_imagenet.py +++ /dev/null @@ -1,166 +0,0 @@ -from __future__ import print_function -import os -import os.path -import errno -import torch -import numpy as np -import sys -import cv2 -from PIL import Image - -import torch.utils.data as data -import torchvision.transforms as transforms -from torchvision.datasets.utils import download_url, check_integrity - -IMG_EXTENSIONS = [ - '.jpg', '.JPG', '.jpeg', '.JPEG', - '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', -] - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) - - -def find_classes(class_file): - with open(class_file) as r: - classes = map(lambda s: s.strip(), r.readlines()) - - # classes.sort() - # class_to_idx = {classes[i]: i for i in range(len(classes))} - - class_to_idx = {iclass: i for i, iclass in enumerate(classes)} - - return classes, class_to_idx - - -def loadPILImage(path): - trans_img = Image.open(path).convert('RGB') - return trans_img - -def loadCVImage(path): - img = cv2.imread(path, cv2.IMREAD_COLOR) - trans_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - return Image.fromarray(trans_img.astype('uint8'), 'RGB') - -def make_dataset(root, base_folder, dirname, class_to_idx): - images = [] - dir_path = os.path.join(root, base_folder) - - if dirname == 'train': - for fname in sorted(os.listdir(dir_path)): - cls_fpath = os.path.join(dir_path, fname) - if os.path.isdir(cls_fpath): - imgfnames = sorted(os.listdir(cls_fpath))[:250] - for imgname in imgfnames: - if is_image_file(imgname): - path = os.path.join(cls_fpath, imgname) - item = (path, class_to_idx[fname]) - images.append(item) - elif dirname == 'val': - for fname in sorted(os.listdir(dir_path)): - cls_fpath = os.path.join(dir_path, fname) - if os.path.isdir(cls_fpath): - imgfnames = sorted(os.listdir(cls_fpath))[250:350] - for imgname in imgfnames: - if is_image_file(imgname): - path = os.path.join(cls_fpath, imgname) - item = (path, class_to_idx[fname]) - images.append(item) - - return images - -class NewImageNet(data.Dataset): - - base_folder = 'new_dataset' - def __init__(self, root, train=True, - target_transform=None, - test=False, loader='opencv'): - self.root = os.path.expanduser(root) - if train: - self.transform = transforms.Compose([ - # transforms.RandomCrop(64, padding=4), - transforms.RandomResizedCrop(224), - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), - # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), - ]) - else: - self.transform = transforms.Compose([ - transforms.Resize(256), - transforms.CenterCrop(224), - transforms.ToTensor(), - # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), - ]) - - self.target_transform = target_transform - self.train = train # training set or test set - self.loader = loader - - _, class_to_idx = find_classes(os.path.join(self.root, self.base_folder, 'info.txt')) - # self.classes = classes - - if self.train: - dirname = 'train' - else: - dirname = 'val' - - self.class_to_idx = class_to_idx - self.idx_to_class = dict() - for idx, key in enumerate(class_to_idx.keys()): - self.idx_to_class[idx] = key - - self.data_info = make_dataset(self.root, self.base_folder, dirname, class_to_idx) - - if len(self.data_info) == 0: - raise (RuntimeError("Found 0 images in subfolders of: " + root + "\n" - "Supported image extensions are: " + ",".join( - IMG_EXTENSIONS))) - - def __getitem__(self, index): - """ - Args: - index (int): Index - Returns: - tuple: (img_path, target) where target is index of the target class. - """ - - img_path, target = self.data_info[index][0], self.data_info[index][1] - - if self.loader == 'pil': - img = loadPILImage(img_path) - else: - img = loadCVImage(img_path) - - if self.transform is not None: - result_img = self.transform(img) - - if self.target_transform is not None: - target = self.target_transform(target) - - return result_img, target - - def __len__(self): - return len(self.data_info) - - -def get_newimagenet(dir, batch_size): - train_data = NewImageNet(root=dir, train=True) - test_data = NewImageNet(root=dir, train=False) - - test_sampler = torch.utils.data.distributed.DistributedSampler(test_data) - train_sampler = torch.utils.data.distributed.DistributedSampler(train_data) - - test_loader = torch.utils.data.DataLoader( - test_data, batch_size=batch_size, - sampler=test_sampler, - pin_memory=True, num_workers=16) - - train_loader = torch.utils.data.DataLoader( - train_data, batch_size=batch_size, - sampler=train_sampler, - pin_memory=True, num_workers=16) - - return [train_loader, test_loader], [train_sampler, test_sampler] - - diff --git a/examples/nas/cream/dataset/transform.py b/examples/nas/cream/dataset/transform.py deleted file mode 100755 index 6625143eaf..0000000000 --- a/examples/nas/cream/dataset/transform.py +++ /dev/null @@ -1,182 +0,0 @@ -import math - -import torch -from torchvision import transforms - -DEFAULT_CROP_PCT = 0.875 -IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) -IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) - -from dataset.auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform -from dataset.utils import RandomErasing, _pil_interp, RandomResizedCropAndInterpolation, ToNumpy - -def transforms_imagenet_train( - img_size=224, - scale=(0.08, 1.0), - color_jitter=0.4, - auto_augment=None, - interpolation='random', - use_prefetcher=False, - mean=IMAGENET_DEFAULT_MEAN, - std=IMAGENET_DEFAULT_STD, - re_prob=0., - re_mode='const', - re_count=1, - re_num_splits=0, - separate=False, -): - """ - If separate==True, the transforms are returned as a tuple of 3 separate transforms - for use in a mixing dataset that passes - * all data through the first (primary) transform, called the 'clean' data - * a portion of the data through the secondary transform - * normalizes and converts the branches above with the third, final transform - """ - primary_tfl = [ - RandomResizedCropAndInterpolation( - img_size, scale=scale, interpolation=interpolation), - transforms.RandomHorizontalFlip() - ] - - secondary_tfl = [] - if auto_augment: - assert isinstance(auto_augment, str) - if isinstance(img_size, tuple): - img_size_min = min(img_size) - else: - img_size_min = img_size - aa_params = dict( - translate_const=int(img_size_min * 0.45), - img_mean=tuple([min(255, round(255 * x)) for x in mean]), - ) - if interpolation and interpolation != 'random': - aa_params['interpolation'] = _pil_interp(interpolation) - if auto_augment.startswith('rand'): - secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] - elif auto_augment.startswith('augmix'): - aa_params['translate_pct'] = 0.3 - secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)] - else: - secondary_tfl += [auto_augment_transform(auto_augment, aa_params)] - elif color_jitter is not None: - # color jitter is enabled when not using AA - if isinstance(color_jitter, (list, tuple)): - # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation - # or 4 if also augmenting hue - assert len(color_jitter) in (3, 4) - else: - # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue - color_jitter = (float(color_jitter),) * 3 - secondary_tfl += [transforms.ColorJitter(*color_jitter)] - - final_tfl = [] - if use_prefetcher: - # prefetcher and collate will handle tensor conversion and norm - final_tfl += [ToNumpy()] - else: - final_tfl += [ - transforms.ToTensor(), - transforms.Normalize( - mean=torch.tensor(mean), - std=torch.tensor(std)) - ] - if re_prob > 0.: - final_tfl.append( - RandomErasing(re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu')) - - if separate: - return transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl) - else: - return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) - - -def transforms_imagenet_eval( - img_size=224, - crop_pct=None, - interpolation='bilinear', - use_prefetcher=False, - mean=IMAGENET_DEFAULT_MEAN, - std=IMAGENET_DEFAULT_STD): - crop_pct = crop_pct or DEFAULT_CROP_PCT - - if isinstance(img_size, tuple): - assert len(img_size) == 2 - if img_size[-1] == img_size[-2]: - # fall-back to older behaviour so Resize scales to shortest edge if target is square - scale_size = int(math.floor(img_size[0] / crop_pct)) - else: - scale_size = tuple([int(x / crop_pct) for x in img_size]) - else: - scale_size = int(math.floor(img_size / crop_pct)) - - tfl = [ - transforms.Resize(scale_size, _pil_interp(interpolation)), - transforms.CenterCrop(img_size), - ] - if use_prefetcher: - # prefetcher and collate will handle tensor conversion and norm - tfl += [ToNumpy()] - else: - tfl += [ - transforms.ToTensor(), - transforms.Normalize( - mean=torch.tensor(mean), - std=torch.tensor(std)) - ] - - return transforms.Compose(tfl) - - -def create_transform( - input_size, - is_training=False, - use_prefetcher=False, - color_jitter=0.4, - auto_augment=None, - interpolation='bilinear', - mean=IMAGENET_DEFAULT_MEAN, - std=IMAGENET_DEFAULT_STD, - re_prob=0., - re_mode='const', - re_count=1, - re_num_splits=0, - crop_pct=None, - tf_preprocessing=False, - separate=False): - - if isinstance(input_size, tuple): - img_size = input_size[-2:] - else: - img_size = input_size - - if tf_preprocessing and use_prefetcher: - assert not separate, "Separate transforms not supported for TF preprocessing" - from lib.dataset.processing import TfPreprocessTransform - transform = TfPreprocessTransform( - is_training=is_training, size=img_size, interpolation=interpolation) - else: - if is_training: - transform = transforms_imagenet_train( - img_size, - color_jitter=color_jitter, - auto_augment=auto_augment, - interpolation=interpolation, - use_prefetcher=use_prefetcher, - mean=mean, - std=std, - re_prob=re_prob, - re_mode=re_mode, - re_count=re_count, - re_num_splits=re_num_splits, - separate=separate) - else: - assert not separate, "Separate transforms not supported for validation preprocessing" - transform = transforms_imagenet_eval( - img_size, - interpolation=interpolation, - use_prefetcher=use_prefetcher, - mean=mean, - std=std, - crop_pct=crop_pct) - - return transform \ No newline at end of file diff --git a/examples/nas/cream/dataset/utils.py b/examples/nas/cream/dataset/utils.py deleted file mode 100755 index d1fc97519e..0000000000 --- a/examples/nas/cream/dataset/utils.py +++ /dev/null @@ -1,303 +0,0 @@ -import torch -import torchvision.transforms.functional as F -from PIL import Image -import warnings -import math -import random -import numpy as np - - -class ToNumpy: - - def __call__(self, pil_img): - np_img = np.array(pil_img, dtype=np.uint8) - if np_img.ndim < 3: - np_img = np.expand_dims(np_img, axis=-1) - np_img = np.rollaxis(np_img, 2) # HWC to CHW - return np_img - - -class ToTensor: - - def __init__(self, dtype=torch.float32): - self.dtype = dtype - - def __call__(self, pil_img): - np_img = np.array(pil_img, dtype=np.uint8) - if np_img.ndim < 3: - np_img = np.expand_dims(np_img, axis=-1) - np_img = np.rollaxis(np_img, 2) # HWC to CHW - return torch.from_numpy(np_img).to(dtype=self.dtype) - - -_pil_interpolation_to_str = { - Image.NEAREST: 'PIL.Image.NEAREST', - Image.BILINEAR: 'PIL.Image.BILINEAR', - Image.BICUBIC: 'PIL.Image.BICUBIC', - Image.LANCZOS: 'PIL.Image.LANCZOS', - Image.HAMMING: 'PIL.Image.HAMMING', - Image.BOX: 'PIL.Image.BOX', -} - - -def _pil_interp(method): - if method == 'bicubic': - return Image.BICUBIC - elif method == 'lanczos': - return Image.LANCZOS - elif method == 'hamming': - return Image.HAMMING - else: - # default bilinear, do we want to allow nearest? - return Image.BILINEAR - - -_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) - - -class RandomResizedCropAndInterpolation: - """Crop the given PIL Image to random size and aspect ratio with random interpolation. - A crop of random size (default: of 0.08 to 1.0) of the original size and a random - aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop - is finally resized to given size. - This is popularly used to train the Inception networks. - Args: - size: expected output size of each edge - scale: range of size of the origin size cropped - ratio: range of aspect ratio of the origin aspect ratio cropped - interpolation: Default: PIL.Image.BILINEAR - """ - - def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), - interpolation='bilinear'): - if isinstance(size, tuple): - self.size = size - else: - self.size = (size, size) - if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): - warnings.warn("range should be of kind (min, max)") - - if interpolation == 'random': - self.interpolation = _RANDOM_INTERPOLATION - else: - self.interpolation = _pil_interp(interpolation) - self.scale = scale - self.ratio = ratio - - @staticmethod - def get_params(img, scale, ratio): - """Get parameters for ``crop`` for a random sized crop. - Args: - img (PIL Image): Image to be cropped. - scale (tuple): range of size of the origin size cropped - ratio (tuple): range of aspect ratio of the origin aspect ratio cropped - Returns: - tuple: params (i, j, h, w) to be passed to ``crop`` for a random - sized crop. - """ - area = img.size[0] * img.size[1] - - for attempt in range(10): - target_area = random.uniform(*scale) * area - log_ratio = (math.log(ratio[0]), math.log(ratio[1])) - aspect_ratio = math.exp(random.uniform(*log_ratio)) - - w = int(round(math.sqrt(target_area * aspect_ratio))) - h = int(round(math.sqrt(target_area / aspect_ratio))) - - if w <= img.size[0] and h <= img.size[1]: - i = random.randint(0, img.size[1] - h) - j = random.randint(0, img.size[0] - w) - return i, j, h, w - - # Fallback to central crop - in_ratio = img.size[0] / img.size[1] - if in_ratio < min(ratio): - w = img.size[0] - h = int(round(w / min(ratio))) - elif in_ratio > max(ratio): - h = img.size[1] - w = int(round(h * max(ratio))) - else: # whole image - w = img.size[0] - h = img.size[1] - i = (img.size[1] - h) // 2 - j = (img.size[0] - w) // 2 - return i, j, h, w - - def __call__(self, img): - """ - Args: - img (PIL Image): Image to be cropped and resized. - Returns: - PIL Image: Randomly cropped and resized image. - """ - i, j, h, w = self.get_params(img, self.scale, self.ratio) - if isinstance(self.interpolation, (tuple, list)): - interpolation = random.choice(self.interpolation) - else: - interpolation = self.interpolation - return F.resized_crop(img, i, j, h, w, self.size, interpolation) - - def __repr__(self): - if isinstance(self.interpolation, (tuple, list)): - interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation]) - else: - interpolate_str = _pil_interpolation_to_str[self.interpolation] - format_string = self.__class__.__name__ + '(size={0}'.format(self.size) - format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) - format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) - format_string += ', interpolation={0})'.format(interpolate_str) - return format_string - - -def _get_pixels(per_pixel, rand_color, patch_size, dtype=torch.float32, device='cuda'): - # NOTE I've seen CUDA illegal memory access errors being caused by the normal_() - # paths, flip the order so normal is run on CPU if this becomes a problem - # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508 - if per_pixel: - return torch.empty(patch_size, dtype=dtype, device=device).normal_() - elif rand_color: - return torch.empty((patch_size[0], 1, 1), dtype=dtype, device=device).normal_() - else: - return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) - -class RandomErasing: - """ Randomly selects a rectangle region in an image and erases its pixels. - 'Random Erasing Data Augmentation' by Zhong et al. - See https://arxiv.org/pdf/1708.04896.pdf - This variant of RandomErasing is intended to be applied to either a batch - or single image tensor after it has been normalized by dataset mean and std. - Args: - probability: Probability that the Random Erasing operation will be performed. - min_area: Minimum percentage of erased area wrt input image area. - max_area: Maximum percentage of erased area wrt input image area. - min_aspect: Minimum aspect ratio of erased area. - mode: pixel color mode, one of 'const', 'rand', or 'pixel' - 'const' - erase block is constant color of 0 for all channels - 'rand' - erase block is same per-channel random (normal) color - 'pixel' - erase block is per-pixel random (normal) color - max_count: maximum number of erasing blocks per image, area per box is scaled by count. - per-image count is randomly chosen between 1 and this value. - """ - - def __init__( - self, - probability=0.5, min_area=0.02, max_area=1/3, min_aspect=0.3, max_aspect=None, - mode='const', min_count=1, max_count=None, num_splits=0, device='cuda'): - self.probability = probability - self.min_area = min_area - self.max_area = max_area - max_aspect = max_aspect or 1 / min_aspect - self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) - self.min_count = min_count - self.max_count = max_count or min_count - self.num_splits = num_splits - mode = mode.lower() - self.rand_color = False - self.per_pixel = False - if mode == 'rand': - self.rand_color = True # per block random normal - elif mode == 'pixel': - self.per_pixel = True # per pixel random normal - else: - assert not mode or mode == 'const' - self.device = device - - def _erase(self, img, chan, img_h, img_w, dtype): - if random.random() > self.probability: - return - area = img_h * img_w - count = self.min_count if self.min_count == self.max_count else \ - random.randint(self.min_count, self.max_count) - for _ in range(count): - for attempt in range(10): - target_area = random.uniform(self.min_area, self.max_area) * area / count - aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) - h = int(round(math.sqrt(target_area * aspect_ratio))) - w = int(round(math.sqrt(target_area / aspect_ratio))) - if w < img_w and h < img_h: - top = random.randint(0, img_h - h) - left = random.randint(0, img_w - w) - img[:, top:top + h, left:left + w] = _get_pixels( - self.per_pixel, self.rand_color, (chan, h, w), - dtype=dtype, device=self.device) - break - - def __call__(self, input): - if len(input.size()) == 3: - self._erase(input, *input.size(), input.dtype) - else: - batch_size, chan, img_h, img_w = input.size() - # skip first slice of batch if num_splits is set (for clean portion of samples) - batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0 - for i in range(batch_start, batch_size): - self._erase(input[i], chan, img_h, img_w, input.dtype) - return input - -def resolve_data_config(args, default_cfg={}, model=None, verbose=True): - DEFAULT_CROP_PCT = 0.875 - IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) - IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) - new_config = {} - default_cfg = default_cfg - if not default_cfg and model is not None and hasattr(model, 'default_cfg'): - default_cfg = model.default_cfg - - # Resolve input/image size - in_chans = 3 - if 'chans' in args and args['chans'] is not None: - in_chans = args['chans'] - - input_size = (in_chans, 224, 224) - if 'input_size' in args and args['input_size'] is not None: - assert isinstance(args['input_size'], (tuple, list)) - assert len(args['input_size']) == 3 - input_size = tuple(args['input_size']) - in_chans = input_size[0] # input_size overrides in_chans - elif 'img_size' in args and args['img_size'] is not None: - assert isinstance(args['img_size'], int) - input_size = (in_chans, args['img_size'], args['img_size']) - elif 'input_size' in default_cfg: - input_size = default_cfg['input_size'] - new_config['input_size'] = input_size - - # resolve interpolation method - new_config['interpolation'] = 'bicubic' - if 'interpolation' in args and args['interpolation']: - new_config['interpolation'] = args['interpolation'] - elif 'interpolation' in default_cfg: - new_config['interpolation'] = default_cfg['interpolation'] - - # resolve dataset + model mean for normalization - new_config['mean'] = IMAGENET_DEFAULT_MEAN - if 'mean' in args and args['mean'] is not None: - mean = tuple(args['mean']) - if len(mean) == 1: - mean = tuple(list(mean) * in_chans) - else: - assert len(mean) == in_chans - new_config['mean'] = mean - elif 'mean' in default_cfg: - new_config['mean'] = default_cfg['mean'] - - # resolve dataset + model std deviation for normalization - new_config['std'] = IMAGENET_DEFAULT_STD - if 'std' in args and args['std'] is not None: - std = tuple(args['std']) - if len(std) == 1: - std = tuple(list(std) * in_chans) - else: - assert len(std) == in_chans - new_config['std'] = std - elif 'std' in default_cfg: - new_config['std'] = default_cfg['std'] - - # resolve default crop percentage - new_config['crop_pct'] = DEFAULT_CROP_PCT - if 'crop_pct' in args and args['crop_pct'] is not None: - new_config['crop_pct'] = args['crop_pct'] - elif 'crop_pct' in default_cfg: - new_config['crop_pct'] = default_cfg['crop_pct'] - - return new_config \ No newline at end of file diff --git a/examples/nas/cream/utils/flops_table.py b/examples/nas/cream/flops_table.py similarity index 100% rename from examples/nas/cream/utils/flops_table.py rename to examples/nas/cream/flops_table.py diff --git a/examples/nas/cream/models/builder.py b/examples/nas/cream/models/builder.py index 013d5dc42b..477a59d143 100755 --- a/examples/nas/cream/models/builder.py +++ b/examples/nas/cream/models/builder.py @@ -6,9 +6,9 @@ from copy import deepcopy import torch.nn as nn -from models.utils import * -from models.units import * -from models.utils import _parse_ksize +from timm.models.layers import CondConv2d, get_condconv_initializer +from timm.models.layers.activations import HardSwish, Swish +from timm.models.efficientnet_blocks import * def _decode_block_str(block_str): """ Decode block definition string @@ -389,4 +389,4 @@ def efficientnet_init_weights(model: nn.Module, init_fn=None, zero_gamma=False): init_fn = init_fn or _init_weight_goog for n, m in model.named_modules(): - init_fn(m, n, last_bn=last_bn) \ No newline at end of file + init_fn(m, n, last_bn=last_bn) diff --git a/examples/nas/cream/models/hbuilder.py b/examples/nas/cream/models/hbuilder.py index a09c344ad7..3b6268cb81 100755 --- a/examples/nas/cream/models/hbuilder.py +++ b/examples/nas/cream/models/hbuilder.py @@ -6,9 +6,9 @@ from copy import deepcopy import torch.nn as nn -from models.utils import * -from models.units import * -from models.utils import _parse_ksize +from timm.models.layers import CondConv2d, get_condconv_initializer +from timm.models.layers.activations import HardSwish, Swish +from timm.models.efficientnet_blocks import * from nni.nas.pytorch import mutables diff --git a/examples/nas/cream/models/units.py b/examples/nas/cream/models/units.py deleted file mode 100755 index 531fcc7f2d..0000000000 --- a/examples/nas/cream/models/units.py +++ /dev/null @@ -1,355 +0,0 @@ -import torch -import torch.nn as nn -import numpy as np - -from functools import partial -from models.utils import * - -def swish(x, inplace: bool = False): - """Swish - Described in: https://arxiv.org/abs/1710.05941 - """ - return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) - -class Swish(nn.Module): - def __init__(self, inplace: bool = False): - super(Swish, self).__init__() - self.inplace = inplace - - def forward(self, x): - return swish(x, self.inplace) - -def sigmoid(x, inplace: bool = False): - return x.sigmoid_() if inplace else x.sigmoid() - -_SE_ARGS_DEFAULT = dict( - gate_fn=sigmoid, - act_layer=None, - reduce_mid=False, - divisor=1) - -def resolve_se_args(kwargs, in_chs, act_layer=None): - se_kwargs = kwargs.copy() if kwargs is not None else {} - # fill in args that aren't specified with the defaults - for k, v in _SE_ARGS_DEFAULT.items(): - se_kwargs.setdefault(k, v) - # some models, like MobilNetV3, calculate SE reduction chs from the containing block's mid_ch instead of in_ch - if not se_kwargs.pop('reduce_mid'): - se_kwargs['reduced_base_chs'] = in_chs - # act_layer override, if it remains None, the containing block's act_layer will be used - if se_kwargs['act_layer'] is None: - assert act_layer is not None - se_kwargs['act_layer'] = act_layer - return se_kwargs - -class Sigmoid(nn.Module): - def __init__(self, inplace: bool = False): - super(Sigmoid, self).__init__() - self.inplace = inplace - - def forward(self, x): - return x.sigmoid_() if self.inplace else x.sigmoid() - -class DepthwiseSeparableConv(nn.Module): - """ DepthwiseSeparable block - Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion - (factor of 1.0). This is an alternative to having a IR with an optional first pw conv. - """ - def __init__(self, in_chs, out_chs, dw_kernel_size=3, - stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, - pw_kernel_size=1, pw_act=False, se_ratio=0., se_kwargs=None, - norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0.): - super(DepthwiseSeparableConv, self).__init__() - norm_kwargs = norm_kwargs or {} - has_se = se_ratio is not None and se_ratio > 0. - self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip - self.has_pw_act = pw_act # activation after point-wise conv - self.drop_path_rate = drop_path_rate - - self.conv_dw = create_conv2d( - in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True) - self.bn1 = norm_layer(in_chs, **norm_kwargs) - self.act1 = act_layer(inplace=True) - - # Squeeze-and-excitation - if has_se: - se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) - self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs) - else: - self.se = None - - self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) - self.bn2 = norm_layer(out_chs, **norm_kwargs) - self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity() - - def feature_info(self, location): - if location == 'expansion': - # no expansion in this block, use depthwise, before SE - info = dict(module='act1', hook_type='forward', num_chs=self.conv_pw.in_channels) - elif location == 'depthwise': # after SE - info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) - else: # location == 'bottleneck' - info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels) - return info - - def forward(self, x): - residual = x - - x = self.conv_dw(x) - x = self.bn1(x) - x = self.act1(x) - - if self.se is not None: - x = self.se(x) - - x = self.conv_pw(x) - x = self.bn2(x) - x = self.act2(x) - - if self.has_residual: - x += residual - return x - -class CondConv2d(nn.Module): - """ Conditionally Parameterized Convolution - Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py - Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: - https://github.com/pytorch/pytorch/issues/17983 - """ - __constants__ = ['bias', 'in_channels', 'out_channels', 'dynamic_padding'] - - def __init__(self, in_channels, out_channels, kernel_size=3, - stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): - super(CondConv2d, self).__init__() - - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = tup_pair(kernel_size) - self.stride = tup_pair(stride) - padding_val, is_padding_dynamic = get_padding_value( - padding, kernel_size, stride=stride, dilation=dilation) - self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript - self.padding = tup_pair(padding_val) - self.dilation = tup_pair(dilation) - self.groups = groups - self.num_experts = num_experts - - self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size - weight_num_param = 1 - for wd in self.weight_shape: - weight_num_param *= wd - self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) - - if bias: - self.bias_shape = (self.out_channels,) - self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) - else: - self.register_parameter('bias', None) - - self.reset_parameters() - - def reset_parameters(self): - init_weight = get_condconv_initializer( - partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) - init_weight(self.weight) - if self.bias is not None: - fan_in = np.prod(self.weight_shape[1:]) - bound = 1 / math.sqrt(fan_in) - init_bias = get_condconv_initializer( - partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) - init_bias(self.bias) - - def forward(self, x, routing_weights): - B, C, H, W = x.shape - weight = torch.matmul(routing_weights, self.weight) - new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size - weight = weight.view(new_weight_shape) - bias = None - if self.bias is not None: - bias = torch.matmul(routing_weights, self.bias) - bias = bias.view(B * self.out_channels) - # move batch elements with channels so each batch element can be efficiently convolved with separate kernel - x = x.view(1, B * C, H, W) - if self.dynamic_padding: - out = conv2d_same( - x, weight, bias, stride=self.stride, padding=self.padding, - dilation=self.dilation, groups=self.groups * B) - else: - out = F.conv2d( - x, weight, bias, stride=self.stride, padding=self.padding, - dilation=self.dilation, groups=self.groups * B) - out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) - - # Literal port (from TF definition) - # x = torch.split(x, 1, 0) - # weight = torch.split(weight, 1, 0) - # if self.bias is not None: - # bias = torch.matmul(routing_weights, self.bias) - # bias = torch.split(bias, 1, 0) - # else: - # bias = [None] * B - # out = [] - # for xi, wi, bi in zip(x, weight, bias): - # wi = wi.view(*self.weight_shape) - # if bi is not None: - # bi = bi.view(*self.bias_shape) - # out.append(self.conv_fn( - # xi, wi, bi, stride=self.stride, padding=self.padding, - # dilation=self.dilation, groups=self.groups)) - # out = torch.cat(out, 0) - return out - -class SqueezeExcite(nn.Module): - def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None, - act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1, **_): - super(SqueezeExcite, self).__init__() - self.gate_fn = gate_fn - reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor) - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True) - self.act1 = act_layer(inplace=True) - self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True) - - def forward(self, x): - x_se = self.avg_pool(x) - x_se = self.conv_reduce(x_se) - x_se = self.act1(x_se) - x_se = self.conv_expand(x_se) - x = x * self.gate_fn(x_se) - return x - -class ConvBnAct(nn.Module): - def __init__(self, in_chs, out_chs, kernel_size, - stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, - norm_layer=nn.BatchNorm2d, norm_kwargs=None): - super(ConvBnAct, self).__init__() - norm_kwargs = norm_kwargs or {} - self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type) - self.bn1 = norm_layer(out_chs, **norm_kwargs) - self.act1 = act_layer(inplace=True) - - def feature_info(self, location): - if location == 'expansion' or location == 'depthwise': - # no expansion or depthwise this block, use act after conv - info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels) - else: # location == 'bottleneck' - info = dict(module='', hook_type='', num_chs=self.conv.out_channels) - return info - - def forward(self, x): - x = self.conv(x) - x = self.bn1(x) - x = self.act1(x) - return x - -class InvertedResidual(nn.Module): - """ Inverted residual block w/ optional SE and CondConv routing""" - - def __init__(self, in_chs, out_chs, dw_kernel_size=3, - stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, - exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, - se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, - conv_kwargs=None, drop_path_rate=0.): - super(InvertedResidual, self).__init__() - norm_kwargs = norm_kwargs or {} - conv_kwargs = conv_kwargs or {} - mid_chs = make_divisible(in_chs * exp_ratio) - has_se = se_ratio is not None and se_ratio > 0. - self.has_residual = (in_chs == out_chs and stride == 1) and not noskip - self.drop_path_rate = drop_path_rate - - # Point-wise expansion - self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) - self.bn1 = norm_layer(mid_chs, **norm_kwargs) - self.act1 = act_layer(inplace=True) - - # Depth-wise convolution - self.conv_dw = create_conv2d( - mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, - padding=pad_type, depthwise=True, **conv_kwargs) - self.bn2 = norm_layer(mid_chs, **norm_kwargs) - self.act2 = act_layer(inplace=True) - - # Squeeze-and-excitation - if has_se: - se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) - self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) - else: - self.se = None - - # Point-wise linear projection - self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) - self.bn3 = norm_layer(out_chs, **norm_kwargs) - - def feature_info(self, location): - if location == 'expansion': - info = dict(module='act1', hook_type='forward', num_chs=self.conv_pw.in_channels) - elif location == 'depthwise': # after SE - info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) - else: # location == 'bottleneck' - info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) - return info - - def forward(self, x): - residual = x - - # Point-wise expansion - x = self.conv_pw(x) - x = self.bn1(x) - x = self.act1(x) - - # Depth-wise convolution - x = self.conv_dw(x) - x = self.bn2(x) - x = self.act2(x) - - # Squeeze-and-excitation - if self.se is not None: - x = self.se(x) - - # Point-wise linear projection - x = self.conv_pwl(x) - x = self.bn3(x) - - if self.has_residual: - x += residual - - return x - -def hard_sigmoid(x, inplace: bool = False): - if inplace: - return x.add_(3.).clamp_(0., 6.).div_(6.) - else: - return F.relu6(x + 3.) / 6. - - -class HardSigmoid(nn.Module): - def __init__(self, inplace: bool = False): - super(HardSigmoid, self).__init__() - self.inplace = inplace - - def forward(self, x): - return hard_sigmoid(x, self.inplace) - -class SelectAdaptivePool2d(nn.Module): - """Selectable global pooling layer with dynamic input kernel size - """ - def __init__(self, output_size=1, pool_type='avg', flatten=False): - super(SelectAdaptivePool2d, self).__init__() - self.output_size = output_size - self.pool_type = pool_type - self.flatten = flatten - self.pool = nn.AdaptiveAvgPool2d(output_size) - - def forward(self, x): - x = self.pool(x) - if self.flatten: - x = x.flatten(1) - return x - - def feat_mult(self): - return 1 - - def __repr__(self): - return self.__class__.__name__ + ' (' \ - + 'output_size=' + str(self.output_size) \ - + ', pool_type=' + self.pool_type + ')' \ No newline at end of file diff --git a/examples/nas/cream/models/utils.py b/examples/nas/cream/models/utils.py deleted file mode 100755 index 0b7ad2c1d0..0000000000 --- a/examples/nas/cream/models/utils.py +++ /dev/null @@ -1,123 +0,0 @@ -import math -import torch -import torch.nn as nn -import torch.nn.functional as F -import numpy as np - -from typing import Tuple, Optional, List -from torch._six import container_abcs -from itertools import repeat - -def _ntuple(n): - def parse(x): - if isinstance(x, container_abcs.Iterable): - return x - return tuple(repeat(x, n)) - return -tup_pair = _ntuple(2) - -def get_same_padding(x: int, k: int, s: int, d: int): - return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) - -def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): - ih, iw = x.size()[-2:] - pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) - if pad_h > 0 or pad_w > 0: - x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) - return x - -def conv2d_same( - x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), - padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): - x = pad_same(x, weight.shape[-2:], stride, dilation) - return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) - -def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: - padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 - return padding - -def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): - return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 - -def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: - dynamic = False - if isinstance(padding, str): - # for any string padding, the padding will be calculated for you, one of three ways - padding = padding.lower() - if padding == 'same': - # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact - if is_static_pad(kernel_size, **kwargs): - # static case, no extra overhead - padding = get_padding(kernel_size, **kwargs) - else: - # dynamic 'SAME' padding, has runtime/GPU memory overhead - padding = 0 - dynamic = True - elif padding == 'valid': - # 'VALID' padding, same as padding=0 - padding = 0 - else: - # Default to PyTorch style 'same'-ish symmetric padding - padding = get_padding(kernel_size, **kwargs) - return padding, dynamic - -def get_condconv_initializer(initializer, num_experts, expert_shape): - def condconv_initializer(weight): - """CondConv initializer function.""" - num_params = np.prod(expert_shape) - if (len(weight.shape) != 2 or weight.shape[0] != num_experts or - weight.shape[1] != num_params): - raise (ValueError( - 'CondConv variables must have shape [num_experts, num_params]')) - for i in range(num_experts): - initializer(weight[i].view(expert_shape)) - return condconv_initializer - -def make_divisible(v, divisor=8, min_value=None): - min_value = min_value or divisor - new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) - # Make sure that round down does not go down by more than 10%. - if new_v < 0.9 * v: - new_v += divisor - return new_v - -def _parse_ksize(ss): - if ss.isdigit(): - return int(ss) - else: - return [int(k) for k in ss.split('.')] - -def resolve_bn_args(kwargs): - bn_args = {} - bn_momentum = kwargs.pop('bn_momentum', None) - if bn_momentum is not None: - bn_args['momentum'] = bn_momentum - bn_eps = kwargs.pop('bn_eps', None) - if bn_eps is not None: - bn_args['eps'] = bn_eps - return bn_args - -def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None): - """Round number of filters based on depth multiplier.""" - if not multiplier: - return channels - channels *= multiplier - return make_divisible(channels, divisor, channel_min) - -def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): - padding = kwargs.pop('padding', '') - kwargs.setdefault('bias', False) - padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) - return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) - -def create_conv2d(in_chs, out_chs, kernel_size, **kwargs): - """ Select a 2d convolution implementation based on arguments - Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. - Used extensively by EfficientNet, MobileNetv3 and related networks. - """ - assert 'groups' not in kwargs # only use 'depthwise' bool arg - depthwise = kwargs.pop('depthwise', False) - groups = out_chs if depthwise else 1 - m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs) - return m - diff --git a/examples/nas/cream/utils/EMA.py b/examples/nas/cream/utils/EMA.py deleted file mode 100755 index e3c47a1fea..0000000000 --- a/examples/nas/cream/utils/EMA.py +++ /dev/null @@ -1,66 +0,0 @@ -import torch -import logging - -from copy import deepcopy -from collections import OrderedDict - -class ModelEma: - """ Model Exponential Moving Average - Keep a moving average of everything in the model state_dict (parameters and buffers). - This is intended to allow functionality like - https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage - A smoothed version of the weights is necessary for some training schemes to perform well. - E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use - RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA - smoothing of weights to match results. Pay attention to the decay constant you are using - relative to your update count per epoch. - To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but - disable validation of the EMA weights. Validation will have to be done manually in a separate - process, or after the training stops converging. - This class is sensitive where it is initialized in the sequence of model init, - GPU assignment and distributed training wrappers. - I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU. - """ - def __init__(self, model, decay=0.9999, device='', resume=''): - # make a copy of the model for accumulating moving average of weights - self.ema = deepcopy(model) - self.ema.eval() - self.decay = decay - self.device = device # perform ema on different device from model if set - if device: - self.ema.to(device=device) - self.ema_has_module = hasattr(self.ema, 'module') - if resume: - self._load_checkpoint(resume) - for p in self.ema.parameters(): - p.requires_grad_(False) - - def _load_checkpoint(self, checkpoint_path): - checkpoint = torch.load(checkpoint_path, map_location='cpu') - assert isinstance(checkpoint, dict) - if 'state_dict_ema' in checkpoint: - new_state_dict = OrderedDict() - for k, v in checkpoint['state_dict_ema'].items(): - # ema model may have been wrapped by DataParallel, and need module prefix - if self.ema_has_module: - name = 'module.' + k if not k.startswith('module') else k - else: - name = k - new_state_dict[name] = v - self.ema.load_state_dict(new_state_dict) - logging.info("Loaded state_dict_ema") - else: - logging.warning("Failed to find state_dict_ema, starting from loaded model weights") - - def update(self, model): - # correct a mismatch in state dict keys - needs_module = hasattr(model, 'module') and not self.ema_has_module - with torch.no_grad(): - msd = model.state_dict() - for k, ema_v in self.ema.state_dict().items(): - if needs_module: - k = 'module.' + k - model_v = msd[k].detach() - if self.device: - model_v = model_v.to(device=self.device) - ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) \ No newline at end of file diff --git a/examples/nas/cream/utils/__init__.py b/examples/nas/cream/utils/__init__.py deleted file mode 100755 index e69de29bb2..0000000000 diff --git a/examples/nas/cream/utils/helpers.py b/examples/nas/cream/utils/helpers.py deleted file mode 100755 index dc42f65318..0000000000 --- a/examples/nas/cream/utils/helpers.py +++ /dev/null @@ -1,169 +0,0 @@ -import os -import csv -import torch -import logging -import logging.handlers - -from collections import OrderedDict -from torch import distributed as dist - -from utils.saver import unwrap_model - -def get_logger(file_path, time=True): - """ Make python logger """ - logger = logging.getLogger("train") - if time: - log_format = '%(asctime)s | %(message)s' - else: - log_format = '%(message)s' - formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p') - file_handler = logging.FileHandler(file_path) - file_handler.setFormatter(formatter) - stream_handler = logging.StreamHandler() - stream_handler.setFormatter(formatter) - - logger.addHandler(file_handler) - logger.addHandler(stream_handler) - logger.setLevel(logging.INFO) - logger.propagate = False - - return logger - -def load_state_dict(checkpoint_path, use_ema=False): - if checkpoint_path and os.path.isfile(checkpoint_path): - checkpoint = torch.load(checkpoint_path, map_location='cpu') - state_dict_key = 'state_dict' - if isinstance(checkpoint, dict): - if use_ema and 'state_dict_ema' in checkpoint: - state_dict_key = 'state_dict_ema' - if state_dict_key and state_dict_key in checkpoint: - new_state_dict = OrderedDict() - for k, v in checkpoint[state_dict_key].items(): - # strip `module.` prefix - name = k[7:] if k.startswith('module') else k - new_state_dict[name] = v - state_dict = new_state_dict - else: - state_dict = checkpoint - logging.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path)) - return state_dict - else: - logging.error("No checkpoint found at '{}'".format(checkpoint_path)) - raise FileNotFoundError() - -def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True): - state_dict = load_state_dict(checkpoint_path, use_ema) - model.load_state_dict(state_dict, strict=strict) - -def resume_checkpoint(model, checkpoint_path): - other_state = {} - resume_epoch = None - if os.path.isfile(checkpoint_path): - checkpoint = torch.load(checkpoint_path, map_location='cpu') - if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: - new_state_dict = OrderedDict() - for k, v in checkpoint['state_dict'].items(): - name = k[7:] if k.startswith('module') else k - new_state_dict[name] = v - model.load_state_dict(new_state_dict) - if 'optimizer' in checkpoint: - other_state['optimizer'] = checkpoint['optimizer'] - if 'amp' in checkpoint: - other_state['amp'] = checkpoint['amp'] - if 'epoch' in checkpoint: - resume_epoch = checkpoint['epoch'] - if 'version' in checkpoint and checkpoint['version'] > 1: - resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save - logging.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch'])) - else: - model.load_state_dict(checkpoint) - logging.info("Loaded checkpoint '{}'".format(checkpoint_path)) - return other_state, resume_epoch - else: - logging.error("No checkpoint found at '{}'".format(checkpoint_path)) - raise FileNotFoundError() - -class AverageMeter: - """Computes and stores the average and current value""" - def __init__(self): - self.reset() - - def reset(self): - self.val = 0 - self.avg = 0 - self.sum = 0 - self.count = 0 - - def update(self, val, n=1): - self.val = val - self.sum += val * n - self.count += n - self.avg = self.sum / self.count - -def accuracy(output, target, topk=(1,)): - """Computes the accuracy over the k top predictions for the specified values of k""" - maxk = max(topk) - batch_size = target.size(0) - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - return [correct[:k].view(-1).float().sum(0) * 100. / batch_size for k in topk] - -def get_outdir(path, *paths, inc=False): - outdir = os.path.join(path, *paths) - if not os.path.exists(outdir): - os.makedirs(outdir) - elif inc: - count = 1 - outdir_inc = outdir + '-' + str(count) - while os.path.exists(outdir_inc): - count = count + 1 - outdir_inc = outdir + '-' + str(count) - assert count < 100 - outdir = outdir_inc - os.makedirs(outdir) - return outdir - -def update_summary(epoch, train_metrics, eval_metrics, filename, write_header=False): - rowd = OrderedDict(epoch=epoch) - rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) - rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) - with open(filename, mode='a') as cf: - dw = csv.DictWriter(cf, fieldnames=rowd.keys()) - if write_header: # first iteration (epoch == 1 can't be used) - dw.writeheader() - dw.writerow(rowd) - -def reduce_tensor(tensor, n): - rt = tensor.clone() - dist.all_reduce(rt, op=dist.ReduceOp.SUM) - rt /= n - return rt - -def distribute_bn(model, world_size, reduce=False): - # ensure every node has the same running bn stats - for bn_name, bn_buf in unwrap_model(model).named_buffers(recurse=True): - if ('running_mean' in bn_name) or ('running_var' in bn_name): - if reduce: - # average bn stats across whole group - torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM) - bn_buf /= float(world_size) - else: - # broadcast bn stats from rank 0 to whole group - torch.distributed.broadcast(bn_buf, 0) - -class FormatterNoInfo(logging.Formatter): - def __init__(self, fmt='%(levelname)s: %(message)s'): - logging.Formatter.__init__(self, fmt) - - def format(self, record): - if record.levelno == logging.INFO: - return str(record.getMessage()) - return logging.Formatter.format(self, record) - - -def setup_default_logging(default_level=logging.INFO): - console_handler = logging.StreamHandler() - console_handler.setFormatter(FormatterNoInfo()) - logging.root.addHandler(console_handler) - logging.root.setLevel(default_level) \ No newline at end of file diff --git a/examples/nas/cream/utils/loss.py b/examples/nas/cream/utils/loss.py deleted file mode 100755 index cc4586ebe4..0000000000 --- a/examples/nas/cream/utils/loss.py +++ /dev/null @@ -1,31 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - - -def cross_entropy_loss_with_soft_target(pred, soft_target): - logsoftmax = nn.LogSoftmax() - return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1)) - - -class LabelSmoothingCrossEntropy(nn.Module): - """ - NLL loss with label smoothing. - """ - def __init__(self, smoothing=0.1): - """ - Constructor for the LabelSmoothing module. - :param smoothing: label smoothing factor - """ - super(LabelSmoothingCrossEntropy, self).__init__() - assert smoothing < 1.0 - self.smoothing = smoothing - self.confidence = 1. - smoothing - - def forward(self, x, target): - logprobs = F.log_softmax(x, dim=-1) - nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) - nll_loss = nll_loss.squeeze(1) - smooth_loss = -logprobs.mean(dim=-1) - loss = self.confidence * nll_loss + self.smoothing * smooth_loss - return loss.mean() \ No newline at end of file diff --git a/examples/nas/cream/utils/optimizer.py b/examples/nas/cream/utils/optimizer.py deleted file mode 100755 index ed49f48d1b..0000000000 --- a/examples/nas/cream/utils/optimizer.py +++ /dev/null @@ -1,162 +0,0 @@ -import torch -from torch import optim as optim -from torch.optim import Optimizer - -class RMSpropTF(Optimizer): - """Implements RMSprop algorithm (TensorFlow style epsilon) - NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt - to closer match Tensorflow for matching hyper-params. - Proposed by G. Hinton in his - `course `_. - The centered version first appears in `Generating Sequences - With Recurrent Neural Networks `_. - Arguments: - params (iterable): iterable of parameters to optimize or dicts defining - parameter groups - lr (float, optional): learning rate (default: 1e-2) - momentum (float, optional): momentum factor (default: 0) - alpha (float, optional): smoothing (decay) constant (default: 0.9) - eps (float, optional): term added to the denominator to improve - numerical stability (default: 1e-10) - centered (bool, optional) : if ``True``, compute the centered RMSProp, - the gradient is normalized by an estimation of its variance - weight_decay (float, optional): weight decay (L2 penalty) (default: 0) - decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101 - lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer - update as per defaults in Tensorflow - """ - - def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False, - decoupled_decay=False, lr_in_momentum=True): - if not 0.0 <= lr: - raise ValueError("Invalid learning rate: {}".format(lr)) - if not 0.0 <= eps: - raise ValueError("Invalid epsilon value: {}".format(eps)) - if not 0.0 <= momentum: - raise ValueError("Invalid momentum value: {}".format(momentum)) - if not 0.0 <= weight_decay: - raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) - if not 0.0 <= alpha: - raise ValueError("Invalid alpha value: {}".format(alpha)) - - defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, - decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum) - super(RMSpropTF, self).__init__(params, defaults) - - def __setstate__(self, state): - super(RMSpropTF, self).__setstate__(state) - for group in self.param_groups: - group.setdefault('momentum', 0) - group.setdefault('centered', False) - - def step(self, closure=None): - """Performs a single optimization step. - Arguments: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data - if grad.is_sparse: - raise RuntimeError('RMSprop does not support sparse gradients') - state = self.state[p] - - # State initialization - if len(state) == 0: - state['step'] = 0 - state['square_avg'] = torch.ones_like(p.data) # PyTorch inits to zero - if group['momentum'] > 0: - state['momentum_buffer'] = torch.zeros_like(p.data) - if group['centered']: - state['grad_avg'] = torch.zeros_like(p.data) - - square_avg = state['square_avg'] - one_minus_alpha = 1. - group['alpha'] - - state['step'] += 1 - - if group['weight_decay'] != 0: - if 'decoupled_decay' in group and group['decoupled_decay']: - p.data.add_(-group['weight_decay'], p.data) - else: - grad = grad.add(group['weight_decay'], p.data) - - # Tensorflow order of ops for updating squared avg - square_avg.add_(one_minus_alpha, grad.pow(2) - square_avg) - # square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad) # PyTorch original - - if group['centered']: - grad_avg = state['grad_avg'] - grad_avg.add_(one_minus_alpha, grad - grad_avg) - # grad_avg.mul_(alpha).add_(1 - alpha, grad) # PyTorch original - avg = square_avg.addcmul(-1, grad_avg, grad_avg).add(group['eps']).sqrt_() # eps moved in sqrt - else: - avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt - - if group['momentum'] > 0: - buf = state['momentum_buffer'] - # Tensorflow accumulates the LR scaling in the momentum buffer - if 'lr_in_momentum' in group and group['lr_in_momentum']: - buf.mul_(group['momentum']).addcdiv_(group['lr'], grad, avg) - p.data.add_(-buf) - else: - # PyTorch scales the param update by LR - buf.mul_(group['momentum']).addcdiv_(grad, avg) - p.data.add_(-group['lr'], buf) - else: - p.data.addcdiv_(-group['lr'], grad, avg) - - return loss - - -def add_weight_decay(model, weight_decay=1e-5, skip_list=()): - decay = [] - no_decay = [] - for name, param in model.named_parameters(): - if not param.requires_grad: - continue # frozen weights - if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: - no_decay.append(param) - else: - decay.append(param) - return [ - {'params': no_decay, 'weight_decay': 0.}, - {'params': decay, 'weight_decay': weight_decay}] - - -def create_optimizer(args, model, filter_bias_and_bn=True): - opt_lower = args.opt.lower() - weight_decay = args.weight_decay - if weight_decay and filter_bias_and_bn: - parameters = add_weight_decay(model, weight_decay) - weight_decay = 0. - else: - parameters = model.parameters() - - opt_split = opt_lower.split('_') - opt_lower = opt_split[-1] - if opt_lower == 'sgd' or opt_lower == 'nesterov': - optimizer = optim.SGD( - parameters, lr=args.lr, momentum=args.momentum, weight_decay=weight_decay, nesterov=True) - elif opt_lower == 'momentum': - optimizer = optim.SGD( - parameters, lr=args.lr, momentum=args.momentum, weight_decay=weight_decay, nesterov=False) - elif opt_lower == 'adam': - optimizer = optim.Adam( - parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps) - elif opt_lower == 'rmsproptf': - optimizer = RMSpropTF( - parameters, lr=args.lr, alpha=0.9, eps=args.opt_eps, - momentum=args.momentum, weight_decay=weight_decay) - else: - assert False and "Invalid optimizer" - raise ValueError - - return optimizer \ No newline at end of file diff --git a/examples/nas/cream/utils/saver.py b/examples/nas/cream/utils/saver.py deleted file mode 100755 index 2ad3d4659b..0000000000 --- a/examples/nas/cream/utils/saver.py +++ /dev/null @@ -1,140 +0,0 @@ -import torch -import os -import glob -import operator -import logging -import shutil - -from utils.EMA import ModelEma - -def unwrap_model(model): - if isinstance(model, ModelEma): - return unwrap_model(model.ema) - else: - return model.module if hasattr(model, 'module') else model - -def get_state_dict(model): - return unwrap_model(model).state_dict() - -class CheckpointSaver: - def __init__( - self, - checkpoint_prefix='checkpoint', - recovery_prefix='recovery', - checkpoint_dir='', - recovery_dir='', - decreasing=False, - max_history=10): - - # state - self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness - self.best_epoch = None - self.best_metric = None - self.curr_recovery_file = '' - self.last_recovery_file = '' - - # config - self.checkpoint_dir = checkpoint_dir - self.recovery_dir = recovery_dir - self.save_prefix = checkpoint_prefix - self.recovery_prefix = recovery_prefix - self.extension = '.pth.tar' - self.decreasing = decreasing # a lower metric is better if True - self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs - self.max_history = max_history - assert self.max_history >= 1 - - def save_checkpoint(self, model, optimizer, args, epoch, model_ema=None, metric=None, use_amp=False): - assert epoch >= 0 - tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) - last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) - self._save(tmp_save_path, model, optimizer, args, epoch, model_ema, metric, use_amp) - if os.path.exists(last_save_path): - os.remove(last_save_path) - #os.unlink(last_save_path)# required for Windows support. - os.rename(tmp_save_path, last_save_path) - worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None - if (len(self.checkpoint_files) < self.max_history - or metric is None or self.cmp(metric, worst_file[1])): - if len(self.checkpoint_files) >= self.max_history: - self._cleanup_checkpoints(1) - filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension - save_path = os.path.join(self.checkpoint_dir, filename) - self._save(save_path, model, optimizer, args, epoch, model_ema, metric, use_amp) - # os.link(last_save_path, save_path) - self.checkpoint_files.append((save_path, metric)) - self.checkpoint_files = sorted( - self.checkpoint_files, key=lambda x: x[1], - reverse=not self.decreasing) # sort in descending order if a lower metric is not better - - checkpoints_str = "Current checkpoints:\n" - for c in self.checkpoint_files: - checkpoints_str += ' {}\n'.format(c) - logging.info(checkpoints_str) - - if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): - self.best_epoch = epoch - self.best_metric = metric - best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) - - # if os.path.exists(best_save_path): - # os.unlink(best_save_path) - # os.link(last_save_path, best_save_path) - if os.path.exists(best_save_path): - os.remove(best_save_path) - self._save(best_save_path, model, optimizer, args, epoch, model_ema, metric, use_amp) - - - return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) - - def _save(self, save_path, model, optimizer, args, epoch, model_ema=None, metric=None, use_amp=False): - save_state = { - 'epoch': epoch, - 'arch': args.model, - 'state_dict': get_state_dict(model), - 'optimizer': optimizer.state_dict(), - 'args': args, - 'version': 2, # version < 2 increments epoch before save - } - if model_ema is not None: - save_state['state_dict_ema'] = get_state_dict(model_ema) - if metric is not None: - save_state['metric'] = metric - torch.save(save_state, save_path) - - def _cleanup_checkpoints(self, trim=0): - trim = min(len(self.checkpoint_files), trim) - delete_index = self.max_history - trim - if delete_index <= 0 or len(self.checkpoint_files) <= delete_index: - return - to_delete = self.checkpoint_files[delete_index:] - for d in to_delete: - try: - logging.debug("Cleaning checkpoint: {}".format(d)) - os.remove(d[0]) - except Exception as e: - logging.error("Exception '{}' while deleting checkpoint".format(e)) - self.checkpoint_files = self.checkpoint_files[:delete_index] - - def save_recovery(self, model, optimizer, args, epoch, model_ema=None, use_amp=False, batch_idx=0): - assert epoch >= 0 - filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension - save_path = os.path.join(self.recovery_dir, filename) - self._save(save_path, model, optimizer, args, epoch, model_ema, use_amp=use_amp) - if os.path.exists(self.last_recovery_file): - try: - logging.debug("Cleaning recovery: {}".format(self.last_recovery_file)) - os.remove(self.last_recovery_file) - except Exception as e: - logging.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) - self.last_recovery_file = self.curr_recovery_file - self.curr_recovery_file = save_path - - def find_recovery(self): - recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) - files = glob.glob(recovery_path + '*' + self.extension) - files = sorted(files) - if len(files): - return files[0] - else: - return '' \ No newline at end of file diff --git a/examples/nas/cream/utils/scheduler.py b/examples/nas/cream/utils/scheduler.py deleted file mode 100755 index 172f20455a..0000000000 --- a/examples/nas/cream/utils/scheduler.py +++ /dev/null @@ -1,309 +0,0 @@ -import logging -import math -import numpy as np -import torch - -from typing import Dict, Any - -class Scheduler: - """ Parameter Scheduler Base Class - A scheduler base class that can be used to schedule any optimizer parameter groups. - Unlike the builtin PyTorch schedulers, this is intended to be consistently called - * At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value - * At the END of each optimizer update, after incrementing the update count, to calculate next update's value - The schedulers built on this should try to remain as stateless as possible (for simplicity). - This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch' - and -1 values for special behaviour. All epoch and update counts must be tracked in the training - code and explicitly passed in to the schedulers on the corresponding step or step_update call. - Based on ideas from: - * https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler - * https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers - """ - - def __init__(self, - optimizer: torch.optim.Optimizer, - param_group_field: str, - noise_range_t=None, - noise_type='normal', - noise_pct=0.67, - noise_std=1.0, - noise_seed=None, - initialize: bool = True) -> None: - self.optimizer = optimizer - self.param_group_field = param_group_field - self._initial_param_group_field = "initial_" + param_group_field - if initialize: - for i, group in enumerate(self.optimizer.param_groups): - if param_group_field not in group: - raise KeyError(f"{param_group_field} missing from param_groups[{i}]") - group.setdefault(self._initial_param_group_field, group[param_group_field]) - else: - for i, group in enumerate(self.optimizer.param_groups): - if self._initial_param_group_field not in group: - raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") - self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] - self.metric = None # any point to having this for all? - self.noise_range_t = noise_range_t - self.noise_pct = noise_pct - self.noise_type = noise_type - self.noise_std = noise_std - self.noise_seed = noise_seed if noise_seed is not None else 42 - self.update_groups(self.base_values) - - def state_dict(self) -> Dict[str, Any]: - return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} - - def load_state_dict(self, state_dict: Dict[str, Any]) -> None: - self.__dict__.update(state_dict) - - def get_epoch_values(self, epoch: int): - return None - - def get_update_values(self, num_updates: int): - return None - - def step(self, epoch: int, metric: float = None) -> None: - self.metric = metric - values = self.get_epoch_values(epoch) - if values is not None: - values = self._add_noise(values, epoch) - self.update_groups(values) - - def step_update(self, num_updates: int, metric: float = None): - self.metric = metric - values = self.get_update_values(num_updates) - if values is not None: - values = self._add_noise(values, num_updates) - self.update_groups(values) - - def update_groups(self, values): - if not isinstance(values, (list, tuple)): - values = [values] * len(self.optimizer.param_groups) - for param_group, value in zip(self.optimizer.param_groups, values): - param_group[self.param_group_field] = value - - def _add_noise(self, lrs, t): - if self.noise_range_t is not None: - if isinstance(self.noise_range_t, (list, tuple)): - apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] - else: - apply_noise = t >= self.noise_range_t - if apply_noise: - g = torch.Generator() - g.manual_seed(self.noise_seed + t) - if self.noise_type == 'normal': - while True: - # resample if noise out of percent limit, brute force but shouldn't spin much - noise = torch.randn(1, generator=g).item() - if abs(noise) < self.noise_pct: - break - else: - noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct - lrs = [v + v * noise for v in lrs] - return lrs - -logger = logging.getLogger(__name__) - -class CosineLRScheduler(Scheduler): - """ - Cosine decay with restarts. - This is described in the paper https://arxiv.org/abs/1608.03983. - Inspiration from - https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py - """ - - def __init__(self, - optimizer: torch.optim.Optimizer, - t_initial: int, - t_mul: float = 1., - lr_min: float = 0., - decay_rate: float = 1., - warmup_t=0, - warmup_lr_init=0, - warmup_prefix=False, - cycle_limit=0, - t_in_epochs=True, - noise_range_t=None, - noise_pct=0.67, - noise_std=1.0, - noise_seed=42, - initialize=True) -> None: - super().__init__( - optimizer, param_group_field="lr", - noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, - initialize=initialize) - - assert t_initial > 0 - assert lr_min >= 0 - if t_initial == 1 and t_mul == 1 and decay_rate == 1: - logger.warning("Cosine annealing scheduler will have no effect on the learning " - "rate since t_initial = t_mul = eta_mul = 1.") - self.t_initial = t_initial - self.t_mul = t_mul - self.lr_min = lr_min - self.decay_rate = decay_rate - self.cycle_limit = cycle_limit - self.warmup_t = warmup_t - self.warmup_lr_init = warmup_lr_init - self.warmup_prefix = warmup_prefix - self.t_in_epochs = t_in_epochs - if self.warmup_t: - self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] - super().update_groups(self.warmup_lr_init) - else: - self.warmup_steps = [1 for _ in self.base_values] - - def _get_lr(self, t): - if t < self.warmup_t: - lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] - else: - if self.warmup_prefix: - t = t - self.warmup_t - - if self.t_mul != 1: - i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul)) - t_i = self.t_mul ** i * self.t_initial - t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial - else: - i = t // self.t_initial - t_i = self.t_initial - t_curr = t - (self.t_initial * i) - - gamma = self.decay_rate ** i - lr_min = self.lr_min * gamma - lr_max_values = [v * gamma for v in self.base_values] - - if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit): - lrs = [ - lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i)) for lr_max in lr_max_values - ] - else: - lrs = [self.lr_min for _ in self.base_values] - - return lrs - - def get_epoch_values(self, epoch: int): - if self.t_in_epochs: - return self._get_lr(epoch) - else: - return None - - def get_update_values(self, num_updates: int): - if not self.t_in_epochs: - return self._get_lr(num_updates) - else: - return None - - def get_cycle_length(self, cycles=0): - if not cycles: - cycles = self.cycle_limit - assert cycles > 0 - if self.t_mul == 1.0: - return self.t_initial * cycles - else: - return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul))) - -class StepLRScheduler(Scheduler): - """ - """ - - def __init__(self, - optimizer: torch.optim.Optimizer, - decay_t: float, - decay_rate: float = 1., - warmup_t=0, - warmup_lr_init=0, - t_in_epochs=True, - noise_range_t=None, - noise_pct=0.67, - noise_std=1.0, - noise_seed=42, - initialize=True, - ) -> None: - super().__init__( - optimizer, param_group_field="lr", - noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, - initialize=initialize) - - self.decay_t = decay_t - self.decay_rate = decay_rate - self.warmup_t = warmup_t - self.warmup_lr_init = warmup_lr_init - self.t_in_epochs = t_in_epochs - if self.warmup_t: - self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] - super().update_groups(self.warmup_lr_init) - else: - self.warmup_steps = [1 for _ in self.base_values] - - def _get_lr(self, t): - if t < self.warmup_t: - lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] - else: - lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] - return lrs - - def get_epoch_values(self, epoch: int): - if self.t_in_epochs: - return self._get_lr(epoch) - else: - return None - - def get_update_values(self, num_updates: int): - if not self.t_in_epochs: - return self._get_lr(num_updates) - else: - return None - -def create_scheduler(args, optimizer): - num_epochs = args.epochs - - if args.lr_noise is not None: - if isinstance(args.lr_noise, (list, tuple)): - noise_range = [n * num_epochs for n in args.lr_noise] - if len(noise_range) == 1: - noise_range = noise_range[0] - else: - noise_range = args.lr_noise * num_epochs - else: - noise_range = None - - lr_scheduler = None - #FIXME expose cycle parms of the scheduler config to arguments - if args.sched == 'cosine': - lr_scheduler = CosineLRScheduler( - optimizer, - t_initial=num_epochs, - t_mul=1.0, - lr_min=args.min_lr, - decay_rate=args.decay_rate, - warmup_lr_init=args.warmup_lr, - warmup_t=args.warmup_epochs, - cycle_limit=1, - t_in_epochs=True, - noise_range_t=noise_range, - noise_pct=args.lr_noise_pct, - noise_std=args.lr_noise_std, - noise_seed=args.seed, - ) - num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs - elif args.sched == 'step': - lr_scheduler = StepLRScheduler( - optimizer, - decay_t=args.decay_epochs, - decay_rate=args.decay_rate, - warmup_lr_init=args.warmup_lr, - warmup_t=args.warmup_epochs, - noise_range_t=noise_range, - noise_pct=args.lr_noise_pct, - noise_std=args.lr_noise_std, - noise_seed=args.seed, - ) - elif args.sched == 'spos_linear': - ITERS = args.epochs * 1251 - lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, - lambda step: ( - 1.0 - step / ITERS) if step <= ITERS else 0, - last_epoch=-1) - - return lr_scheduler, num_epochs \ No newline at end of file From 25596972b9c5ddb1857f13820ffffd6eb3d6ff51 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Thu, 22 Oct 2020 11:23:50 +0800 Subject: [PATCH 35/62] Delete cream.jpg --- docs/img/cream.jpg | Bin 98959 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 docs/img/cream.jpg diff --git a/docs/img/cream.jpg b/docs/img/cream.jpg deleted file mode 100644 index 715472b1deaf95a771399649c2efdb9bf0821bcc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 98959 zcmeFZ2Urx(wlCVl3=BEvER2#RDLEraRDwh$Nk(!GB14cUl7k|WGf2)k2SK6)Ns^-= zIS$Mn_kZuR^}o;A`@DPaci(&W+dW@*S9e!e)vB&q>sKobatgTs5Z;$pk_SL&AfOlZ z1CUGDLQ1l-hN|i+@=6cn01yZOuvwL5A3jC{0KnGH#YtU3hEYfN2_tj{fB-nC9~&U@ z%*5H@;bVCXR2JZ#+=B-{&%bXc;{d8802t#?R=Fqr`?G&}g>B;C$;!ggg^|^SjggOsmzVLLnT-vjlAX!zpU;2x$ImkW zoXq}}P7xDVC$~TDL4Kal0c?N(AO)xZdVm>V1-Jk{KoAfEqyc$A8Bhf@fhT|gU;L0WLux z5EO(5A^}l>7(h2c93VcB5J&3Y7za!az5(U}3xQ?9D&QwzbFdTG2OI`Y0B3-U!S&!C@ECXj z{2hD_!GMrJ7$IB`QHUZ$6Ji2!g!n+hAxV%tNHwGrG6q?M>_M*4@z80}InYJWmC$w2 zEzsT3gU}PubI_~MyU@qcSJ6)}Ffhn5STF=J6ftx$o@2bg2*>zC`%qN(3nEsdvm<5>4n4_5Mn3q_D zSU0gmuvD;2u{^ONv9hr0v4*f#u`aQRuvxIhu{E%tWBXyh#V*F~!k)!G#KFN~!V$(% z!?D8g#d(WUiqng;h;xohjLVKIi>rt0iW`BOi`$C(755Mi5APP96y6g&7rY3(e7p|4 zdAu`x68zivO8BPuaQyf9)%c_My98JSHwmN(3<$gk5(p{?h6uhBViDdVlqGyd=tKCP zu#Rw&@R*2%h?hu($d)LasDP-CXoDC^e2Z9)*p&DcaVBvG@iGaRgqcK^#DpY(B#Wer zWR(ngkVlf2lTVOeP|#6G zQxexP%%=;QCU+(Q`Jz-QA4QNsnw|6sZ*%CsK3*Y z(umTS(1g*H(@fK%(X!L3)4rh1q#d9=rlY4*pmU&mN7q63ot~UtivBr$9DOtWCIbnB z1cMbr977Ak79$y>G@}jUTgFbty&JSQ6mPiRNWU?7fx3q5s->SK_!9vNR#Nx$L$TG`{$12I{ z$eO|Wg$;vEh|QYq1KTh=8v9*#OZNBdLmX%v0vuKxA2>!h(K&@V?Km?yC%JIBq`BO< z3b__}D?jG)Ip1V9YJXt(5yu`fsdHs3oc@Oy5`AqoU^L^pR zY|)LoLh5AO!wZNGaZASmD@P%N-12op3COctCJA{9~<3K!}X zMi-V5h6^_epNj~HxQLXC{1D|5wH7T9-4J6DGZV`aTNY;$e0KEf87G-4nNwL2*_X1d_rUiQ?uFeOlp~hYluMGE zmuHfcO zWHcf*W;9tfT{YXa2(-ykE*BL*V{B2%I;qU@q3q9vpAVu)hAVwPhc##YBM z#)ZWl#T&-=CEQ6!O~g)gOdkxD5} zrAvLCdXZ+8HkGcBUY&6(BOwzb(=Br?ODn4qYCU8g4h_Hr{A_-$d3F*^J%%su|hh)pFeG(E7d2qHVd| zuzjXOr{hbfTIXPwa#wG+e0N8WbWcmKcyD8$NMGG2p-(ma0{ztkcL%CI-~C)QC@@$( zBsf$%EIeF4A~w=IDmmIVc5kfvi_(|=@kiq$6PgoKlX{a2Q>IfJUv0kbPrFTD&iKxv z&xXwr&Lzy#&S!jM`&P0bu+X%4Z*gGh@zTt)$@2D!^9o}1)f(Pf!aBox{s!MhFllUJMAM6!VjqqbC2#EwI8b<&!57w^wXe_1DVRvq)Pc@(&F17c_9=Cq81-{#5eh^#;>Z5-^zX^P-T&M9uTTDtO}zO4 zKnfcLMMnRAPZx#N%2NPV`wKp~cQ1A)$266_F1#mGj2`~w0h)8LeX=y2_8DY0> z(K5mqZ{Fc%gt4-4a0qj-u(I*+-R0w96BI>le>wb2{B`?T2nY`chY+6-C1qfItT$Ns zMDOtN@%=5sf1IU$ZlZrV{`v5;5VZg0LRtYLOi(L&D;Qt`Ktv!kA~a+tz8 zpI>x3CbLXg8=3mZ9*p1E$sY@woPv^y`sOWE_psl&DHcF4O)YKI z*D^6RGqgMj@=@sxQFeo@AG%O}IEYCcR z`u2{_uI`@RzE7iLU&bdUr@l@vE-kODuB~ruZtWi&9vz>Yo}FL(qzeRq{~+r>DEkv# zL@2t@AP_JF`jajYn)^?}i6H1Vcrl2j)u7KDNtpQjFiB;i)63ehnEBQB$c&vvu*qR} z7H{tVB<(Mh{r3p-|DU4lKM4CfUDE(A81)H=(1-v@;P?q^hA;MiwsWNJQ-s(Zu`!`x z&atEjslI#n)W!EeyUz$SCHJSXWUfGx)?68Z5C<+#>x*VNMEVM9Y>n)pWsK^wy z#Nswcc2nNOFV+`J938Oyk5>GpH5#MG$}gYK+1bu`+b7eKU1sGk`ie{MNRe-3gaND> z@9y;31O>3aUaeaNzMu@%7vKn^a=@y<30TS1O2VzNVSFY zJ;E+kT7m|ic6M*{uhvXcIuh^R432V8{4VJHtqE{((Rx&oKewqBv}*hYk@s!|!(7is zzmx6Yy}RreZhHfUwCCE?RrPwx)bi!s5q>Z}TA?A~H$3tRGA}NiK)G_Hs-WNY|9of+ zL%yiV0i=J~|LM?3!xLeT2==;&52wHfte(B;HF)O=S%&QoeKbfz3u8s?reinjDV)eO zh?QvpC@jVcWMH^d^4i`337C!gnH&2m*T7;C95mdpJwAmzoRK{6m|gK=S$IE30n;+) zcQW}$7)q^M*4U>@w}NwP73yRZhju&ei$;_MQ7z{<8@T2ocqb4Kv|emJFbX#}w=%O{*L2z+_uYKSLq%8z-nWuGoPb~MN?r>}QMde-pA_CcIxM-q zjs$KfBY{pWZ3NpxmTRveB=E8a33P8Dfk3t_Brv3l1b);afwMW_ITHBo6*VM)qb!94 zQd1Fu+(>{(*KjZUPU`u#qSW5bQX@Vi#$_LQb8Q2USCEcfj7e#CBNqVH|78S=Kmh;|DNan_i{KJtLs~1uIh#8 zosULw3xf5p_)nc{2}S(*L?>_l!^j_)kibl*DOzXrmC)^Wa_*b=H8M$?^87Ib(C5l| z#EI;-?0s60b$-WWgjN)xkDcDr`}G`O)WlcXbkiVp(U9`%Svj!Jqsx1EPjSAs+heye zT!6=U5MK(I&j@1$e%tFx&J>Rsb{M`RfeN`bPkJ|>(Mz`wn=W|s^-YZvD zHib(aCJ^!U!rq(&E(j0>n3V-dVbKyRl7gD#$kdm2;FhoZ4-h4(pXVCjDzSD#+ou&X z*}&63YRbP(JZU+%IDvCqmGBSDAp!55nX~n2U#=0=4p+Jk5w^Zi{~D;jrXf8N-~jiO ze3Z+*DmhKHJU{{yHHoXcNI<)LZl>jRv18AFlxf;EjwOX-w7@%!^&JaXc(HJW2S|jq z?IqMX7T;|9p!Ni(?im`NV!Q3GSBi=d)e+f$f<^xdb!Zbi6?n5>qjSn<+sPF3Ikx!4 zV5roAzf828d>%ratDxlPQ~3Tx*Z$|W+L%$H<@f=Lz5ADp@8a0~m2mSkmYIE(cESVn zNw)TS&<3^iiyQ!d{i4TANkt$LQoyadVBl3#3Hw12v5~BA(;$O~2X@~1fJ~bQ}uOdqlW`4c?!pKj9(eOQS>btPx zKCrmkkt}x?_c@@41ZKFesN+qZet}KctX9PWd+210Tgxy3T6;W*H122rUjh*zL^^ zqUCtpnu>vDQ$9x487W~iFw2-|Q{G-jW&K3DU?O|g(rl&@ZgjwEPgR|(fwVXQOI|OA zFUQnb_4>V_Yta*3!$(v1!UXI|&`i|R6(1*~NikY9<50RjbXO~lq1u^WTky;r44JCG zJu1xlK(WJq&;R=)c_m;iem;>U)Xr6r%q-mCwISO|%{$e%B#w|c8hLFhiAM17cdHcU zaqgY2_fLlChV>&km@xf%<>Pp77*frXNe@Z)e3l|ZsN{;Cy;gBe(d2vW%Uk;DE4hzq zZwFUwZK<(GHxI8J+mE9rFAm+^6h)T6X`Vj9n`1OlaZ(NxT&Zy`R6@dHChK3H3sCgq zRw&rnfkP->L<;Z}9;`UF^=(LGq5YtgcD|b|xZgSdGHa7DLNY}rySnm49Ic%?&a%^< z!T`n=@LK9ivDw6~%}RaDy+E~0bS6+YcCRu_`9q9%BT+BoI!1Qo#H|sR{3_A>Bp1;X z#!sOK>zjM4bE#7+m2QX1V#Iy4wlTNM4IY>IfoK!KqM9My zyh930Dg-$-+5JXurAB;xy$i4jRUPmt*g>tEqrU70n-`N`J4a?C#pRcYyQuua0xruM zh=CrkBU~MA1^x3rMcA&z-rl%Pmd-(*B3qLmA3GU(DaKeWvFh$lL{)muS8bCCIpiNn)7YV~z=+xDbz>w;Rg_+V!Clc&0$jH9{IxEVEE z4d>V}HT`BkN4fc9hx0O$X}2sXkJpc~u*Q@nQK@Oj-o6?L$9QbMN=2s#&J_z=b!ifB zyo*VXLk!o_@-=%70bRGm1 z?45Ow+HyYkGdl=z_M@^m#{oyZ_hn#PiOLL*)@>o6dUk2^;4<}6eg)Ab%VWq%-U_%f z3jt^f55k7_CJ~{i1)xC<{DQO+RLI@=K}1O1p}f(y=0%oV4BsLqe#dI>JRfNpG~5E% zo!uH~4;cyIjWZ)>$|_w*fRq;xW1hbT^#ZGLwA|-$NMM%&2~-~sPTdbs9b>As(qgO3 zi{XoDR9>UyrHLPlA(I9R-{cb^1kG$TGX+15v6N9FO&A~>)dU4C#tW-|l5T5*rZi!~ z>`UcDjgw6O-E!h4hf|_}@PHwSc-h6-K8M7?SA5mk4$_aK zq^G1It{7Da)Y6;24+$7*rCvNH12p%{bMKB~myiPrcJBCOgXH#%;{o26R>?;z`ixE7yk~mq5jiR)l_Ss0osi#lJh);j2_o$6I zVw89QzMXV!dIUJBfRk=}Kc#z}n+h3`yU`jya?eBvsHUxa77EyF3XAy#gyX4OR1M$h zJJ4b^E%Q{AY)$v_>&NTz0FoD$!@ljmnmDDt*qn3fHg7}))_K_p&vrUh~MgNEK{;i$no~ekEBDxF>n{`L5ua0R4!vDk1VrDN^tm! zJ3#pGh4SXn8@q0f_J#hOB3-vn=Q9D41CMa%(unz#IJWRCJ%UW_iL5L!XEQdUQ=}|Q z@x}4+C4xtN1%J4zl-Ist_RQ*DivHgFIEpgSR27Soc4c9K@MHMXIUfR|4eg41bZ)W^ z;pMj{wR8qGB0N{z#T`6*siHi!C0H7&>Z~;SNV)L8W(VHS@NTtnnLFfV1f)7r@SW{d zM_sO%`X4Ks8tpva;YjoZnMcTgEX+lHMc+C&YG*xE@*CU@pk1f)#P|_bqV`U5EKKnn69m-f*${eJ9Fu}lt%@GLTjz|A@}D{m4Pm=6~B}QuN!5aOH;-3m`qkCmvW`Gw}hVPlHm^rJkJWlA$C$U zLW>SBFHQVcIJJXAYh)L}?P;C&gky-5yw4V93BIpN5=86W10x2Yk-G`(Ad`;ioI+=3 zXUlGRwW@`brSVKX$2uy7*cg64U;ZGww@*qI`i_T)zCN8-b_;1XAudEqaMfa8G=PG&_}*W_pWs)k^WTG@1=y%c{jza#E!c!Z0R|yh!s~} z`z*rjQhgMj`F{c}4=1~K0h$%flibG~GdEKhZyfkI6VX7TdC!?=-HE3L&oL<%2pw=3 z$Y(dAzV-O@b8OCFM$3O*GJ5b_gALo+R}q4H+Gb+Xfj_$a z)p1YAkyuJ5BXwuP=*m9VU_M$=+3OMORk4Iab`mr@#+U%hPs=QmzFvel^*IfBQ%ble zKb#%JM%M4y=u=qH#@&KFt2vW)V_}B*Iw1j)&B1G?u{n$Q2N?^vHa7(1Qsek|`b~E^ zDN}gEadpPT6bcMyUJF#Eh^95rYuzIPN4;S+mTBg1WDiIQh)hadW{~JtD{a;>OA>W< zpp|Nv^(+fCOk|G0SCKwuBt-icAl2M2U$6h;?AJb5SSWU;JoY_sI9XJJVxlPQ+=Dqv zdJs=}8S}PzMbNd-`=QDYFruErUXALBNT+}|pQOF*3^31q=XC!i$n3=k%qbIio&7L> z?!j34>^_EuN>mx)AvO3NNL`8HwiJ7j6%@@SxgJXUGxRw2DSC^+|i_CH0LwP49k1CF}XjtLr zj5#Ne-aHNRR+>i(O0o^rBI9qK^k}?RX-wK(rF-(}*{TBOJ*|WD8~qf_0h7hfeyv4A zM3(lM9y(TzMKPZ=g(3u_$cc+0Kji>@qV7h~Th96vq1;;Aipd%GyDiP+)NmV4`FN-v zzK=6?WyDWDU_}dxI~9qDx3aZh3%ghC+vuk@4tL4Ap#^mokeA~l*UPO0QLM}_J}kYm3tiFwM+^!4nx_aCPR?GBIOwD!y}k|jO6KCTKaGaRUGHG zZqZ?AinsE<#`TS6YPs~A5I@S7a|gTKvoR8yLSP}9a!-)rt{BMzjXkyuU3;H{rxoTP zKIZg0Dlo*~gX>#3s2@SK*KT90F6C~P8BVrLYv-V%?x`1Uf{X9mlRx~sIpWXeiT{1q z=r3he0yOou^t!&c;xeOB(#YS8PI+ZY6y*t}CBy?X_Ig5|?aRgJcMI(Hn)Tk)afLFx z-B4s)Fs~}?x~82QTBP$l`ns2)ZesHx)5)~EwcKCUiPW*dLu`*@`ZtXbIJ~L(p=6iz ztT6Op&+f9P%Ef_~e34_1o^fj7`87{%5RJ!5C>M*;z$B3f8!P^}hHb=G*zRS6)Rq%J z#R=-ey$arG*0|Q568regtoCx=?Yl|;&?D#Y7n5dpGDvSb2SPril`W?Lkl61SD$jw% zf3;TsV5_a(R59`WW9RD?z4LQvdYV&5G``MiA3OQs=HR=At#%_#OJYvx2`8S313~xD zX)K|fX{Hwz?p6Gqi4bcyzK~VZ>Pv;UeUQP-fe`dIjT`0ghMz3f^2s+nkFcf1XNjMJ zrVTkt&<_n}L^UaFkuf@lE$)fg!x%U126b-B8H#Z~9UkA$z8Jj4mVpnVb zK=TkTN34X(PuY8F<`7|q&`@1cH?anDIyE6fQLtk zCCC5Ux9a8-Zk~grzAVGrch$4{OKGbSr3`YCvpL@U+SMaV)zPpB#hP7W`OVW6H%iVl zdU32CZw*6B$-UU~YrST}Da4!Wdo%9m=iNJRyM^aY9p`=7y(PxX{p|0#6Z`C5F9K1_ zg@gs*x@p?vg=CR+x^ioiHm?u!S&1^M9hHI=_ptM%s{|GR{vMWmmM=-4_r^aDhC08n zI~hfs#;UB@UQH&_QS2c@SH1r^Y zCf&_k%yc=zgOQw9p~zq(Az|Wlv_fCMVx@=Tt(vAXT_LH5ai--84{lepsh+0ti;r<0VLcEf153BS?Hq>I=YCgT_mn(Q8%T)bA%vXVl)YHW5;rH}J zW@biQVg+Vgv|;on8l9GhhTp$iq)h9Smn%Pi#=0G&QIkl>=r?Jvp5}gQH`?8KDeukn zqi2CxjubI@y781b>-_SwFn6=)5)Pks2EKm(>K<2MdzyY%sj}T% z>$$*TY&Ze#tl=3W9lNz>F_Gy2-%NKlcCQ!jO&P+kEY7s1hm~kjcO>Y7HJb6I?RPlB z<7uQNZz)2O32ajHi>vFnK|Ss^r=em54DViu`9%oO&B&8}DDKF|D`lfQ>~gLA!&I(1 zr{7^hm7uL5*R4Cn9-fRF>-S}n=EZ)nT$Mxq+9%AxjWKWwLnEv6naRAbXc$NTt+SP| zm7D1tvHoMWFgF-PNaCyHQuq3q}kyQFgtd>trNwAc+K?XI@jJ zUI`URr&?c%zKrcZ^q5(<`})N~ z)N?c0q?tV-AZK3cKyO~;&KN|!)Sp>1(rsAKyEEpylK3DLoPzU96BT}Kov)}N&Bm$E zjj{PpvkI4UbyjqVH2OM5>;lgIBH=G4#tR?p2e22k2xE3lmQWh!snB;^OuqeR4g7z# zcm6+Kd3I#?rIO&wjQM+;kz5H6jtAX6J$ANJ?XCm@_em5dznVno#|Ykwv$5Ho#_&{s zBo7$BtX}|U|6-2-^Xu&G#Cv zDWL~rz!&P5q@d7D5AOTJutVDgP52Mu!47|++d9S4!rlDysV^4k1l1ZSVq|bzO+3sW zw=y!sMZR^jZ{*>T#}Hsm!~M+BQJuux&`S5pA zZfXfys@Y2kif{d<&TZ;8L@o%V)y`^kWA9ZQGJ~BN83)x+PX&-yD@|) z!wfMu$G%<#E^}it271E0Z2`QZ0bMCDB?Q;hU3dWn*8a<8P>sSLaPq_Q3dciF-uU9M}d{1~m`vb}$ z!nOeABVmtllDxhZdG@f%@O%deM4*^g=g8H2?Ro^fSF&=`4IX@+@vwFYT<0{dUz6BsU%!Aa zqxj|i9r$n6{qaqtuX2?u5RwB&%5)f$j{?)Ax&>6TVKGYDmJnMUW*#tKes|ga2TGjzS)*o; zC3t$HgcLO#t-HT{OGN@`lc?b_f&?%p-?X$yUZa;F0lG8->!K_BONBm$X9ta+J0vZ? z)!5-XRwS>}Pkr`e`K?KjzqJPN>JG>mdPY;#5uD^H)`uO=cvDj*Z@WrU3Uc5WJBgO~ z=IBKW;pPuBJLaq1*~*CiH^oIfWHiEUbn@ccpZ`U>zuG>r2&dB2Jnxmi-Q>|hCgX9! zFp=}$pe3X6@ikEi4crHgz%jfQRrrm7|AT${f1(}R{ntWsic!<;1id@VM1JFFfx_FZ z&gWMgB)@in4n4=%-6qjb*Q0!NGbf(0SYEbs;i4vaM?;HJ^v8=={Fl`2IC`wS_rhi* zHjN~Vh3KBL2=xuB+)X!N`+cZn{#vXQ{iK{n?r+^&_*pRAn`d=)){zQUb}56trNlBI zCI8JBTDAh7-6&xi_PMKqh4qrqwUuUx#?aU4er#oo<8U7Bo(I|Hg+!HAiS+u6B1}fH zx3`T>8iGdf#ieaS4lwvd|p%)^V4g#0l;n5u81F z0!5(M+X@@}EDYG%D1LlRp5TWBBqh(W4X>FZ{}-QE9!XMIH8Z;@>jsnqiqoGQJO(Lo zBlTQ*ht#BKNVQRm11zgc!>Q&z$@i{ueWF5@+*u;}=7qQjRhA#ie!k3T(N>rnh$S!MdV>qH6hjpOsodw4^Go%8x~3FY9+ zJaAe@*;waD);4;l`b=e@43$ir@l0v@vW3()?l~AqH|Vp?FOH|OuKZ8C_0_VN8Q95i zJ$IVoJ+}K8K3JSR)km@TM3snBi^R3vb9cM$NwaOg&6GIMN4=H^Z)fN$#@3D@#&eRR z*{O}pLzC8B`I3<14KYFUQ}yVBfGVZ#wYKTZfW8Y_+}PH;OU-0^bKg4kWt1$UcGRgY zV0PyffofY0Qp?Z%#(cAY^3kdvp)v1t`wM6(qITXWskN$#B;)QFWRY(t9pN!7%qN=i zI}ABYF|NEFE0zX5T98^!V9xJ(;HFhwJxx~{!BZxOopPp%@@a(=D~TN_6~liU1-=Al zn)Oh`C45B}euna}(%N7+M1h)UGt}(d_tZh#-g;}>=*!<`dc0cN{fN)IA_fC%L+M;; z=s`f6H^=_t4T|6gwV$#^jnGp0J}K(aFVGsT&=QK^b^D5=z{FoY?Eiko*Ip7tc(7c# z8D4?5PW|C4WCRETBp`oR?d%Q8LXNVV>Z8zrlvmS2>N&bB51ZB1tqXt&Jb(i{4aR1v?#H3?Lnc}jHlmotdwc}>P zTC0?(QS7XT?6wq*i+0Qt#QlBoBV(PS z_>qbrMHVw5A)y<+TkQ5{Dvi3RPDKKU1eAmSQXY8RvV=gA>ju{lg9HuR_V1Xvv!&Cxqf4MYt0Y!n! zsvaaj_E*g>%RDC%NP}N1>%sRm{+xv_F-#0A_bi*T9`ZKgrgYE{B@5-e5Exo5#r)Be z>^LHKujR3}{PEP8C`>OrpX$O+55+@#wzabng8Lu8g-eW_1itZ3<)+tpc6z!l`mUv( zBV(UF02<0UKcOmNx3!Z zwhm>q(GEa}jpyzaS96&{uuKnNs?I@kRVug??6VfBan&M|jlW>P|A&@61I_)RYdw@U zyXAVcM6-nb45cZ!4L|vY1gun1Q7!)>=*A136oh)=p9a>rZjh=%QM!eH50aH$? z=#nd3+*e*L*WnRO#f^^_auI>3HExMEN_s^PLmgk_&1^Qa6NrUUe-H$E+c3Nj9Agy3 zsO@T8ZI^LobZo)~PTbW#!@~GNUiTUDr{8uA)LY4)d((&#fLQn_Pbjr%lDlI z#Fv?dp~1PY-HwAIOAYnF^1{AEW}emP^Wl(JI875Q+K0G~#58=+=_0pkKpyO?v@)*3 zU^u%aK|>KO8B;DtQV*(odtcS&TePpB^8sQ`hziPTO@6T!?R@v>`=o)!mTeza&Xl4DYv0+b)*z zlf`%E+ew9rHTu%v1V;xc?dsrv5E&0`R4TO6c3V5ZOu(rlW@K_YV6LT+C8^S}5T53N zI{P)Sbop?N9XlG2C<+v(+g+xfda9b@wn|J)XBkxstfRDyVA-Wxnf7)ktOrcxTjU3X z10YO6{}qz$qIjvI)?H)X@CZ50ee{om!Q`oVz0_`FaYb$TYfOs% zV!qW_)AF9Q>##h?Fh)>%(Q^s9B;nfw>Q6=4`MV=Yn1vbnG29I~9k4j@RjRRUi=K;o zPn0y_l}{M0WPA|ueNy(lQIR}A!i{*v{jJ=G=>Fs^{#nXQznl=XNcTbSS@ zKNSCnEW}(4l)RLkQ)Zt(Kc|p4*gO1aKfC$C^N|k?_+6kJ?tp`sg`n04JiQpn2As8D z?vNC;5n~*z8kZ|NKwfk^k*rCwF0T&y>}ASM+}>O ze*VYg!VXn%6*0x?Ch7vzfL1WTepn6Y)|87)B z)K}~^LY}EX%--H^g>2x-*pFi_bM%G~IZ``8m=lJ(nOOeqVu)Fg(9Es*T*q@c?!cA< zpc>^o+H!GsEAdsM3E786Fa1yT^HO3ZH%*M68l5YO6=N^2aZ(gIyuE9(`0!JIhAbuC z+tYQ{HXkd!a9+^UZ&pGZ4wm4=v$-PTJ)Q6cFoy$=bKE1Zm1hScP9i5eJ`5Yo9n%lg zqGWp^k+o!^$zXphA$FdodJ7=dz-%8|`0fzt%ji^~l8CKU^q!x}BmP5Su~eI-rgmRG zZxq~OeBW|${2mF&Re3@?;OI|0I(8T=ttk2q_E+N>;6BcEPcJi?;9aQ)SPdvnq1$rw zR2#I%e{Cqa6MQ|MhG+P{&a9d}$#2w!0PZYk`PEg=b7?NT(BM(|am#db{_8aV!2Ny` z5&ALPT1Ad6C3(!}VYM-wnNCsCyZCZM$z~O`dJDg?*?;7a1^;tQl;JngS0YuUTtaOn zU|W@|t~k_7Eq$K$?94T5C1G_eTO?xS;RQ$X28@$m{*zBT3ks zCeqOpFM4pg9lwpGt-X3rV#QGr{4eS+Zn!2Ec!W$0H zlVSD51#Jztnh*aQWMozRPl>uecSnvAy9{3^XxCBJ4N~l2_}e%9b6l|NSGX<8nu0w@Pepv^t*oORDqgUOtBpG#{#%JU%y^b%g)(LcSGZP|*e`KcPzbN0_rLMq-*| z|5|Gfi-UBM700z5=4$~s_hi5L;E@~Dvf>86xIj&|VKbB=riA#cZSiQd3cWrOWY-46 zmvCt{@r6Pek73nIFSM&CQ@m{gX&waj(j=?^j=i`MibLtGL&ILG&}WMujjX4=1x=`8 zc%E7^V$cc!hrX?mTV4|%oKkV%DwBoMfS>C0cPMWSBydiG1WJp1G7W2%S_Wpek0jsk!B^Z#wFywp8~oQOG;x5M49^r0 zoI@AxsMT=lRUm^}$;`gt8iQdv;#&a9J)<_G7JjCidh`RO43xaYmi(obi#I;sP@{|I z;)-Am!EjF3Mb?3uWdkU`4LBkc2~4T|Udb2h6NL78@Z<(u5Cz$KcRAvJU*@A`=Pzy2 zHDKO{BVdG_5?Y^&X+%$5ASN^lAFYGiwE`8ZE6=HndE+eZtEp(O4ED519$*JwLyZ&yhz`xaYBfWMFc!Rp1`%bF3^`vrAC>4(Drj`l{Qk+aAyd9?2)R*S*@(y9WP zezlH4b2C)y$%ngDODWVSn!h4}?JX4X(g`Rf2BrEO8Y6ZA0Fnp5z5pFiY`Bz&@VyP+ zNxXh@e``DL3$@LqrtqmX}`YmJM0>P8s z#(iR$``v;>Db78pn1G|a4~lL2rFPw2WP@~C*DDxuj9vxA1nWr{4X)2+dNU{2(3PkV z&`sT|=k&&F3JP*+rQ?gu=L&cm@bn*utAb-&DmISvKvJ~_Xki@SPKi6^d*{4m+^h_a zFWZ<|(ykt13NgiHWfb>k-*+|ZCv&qOG0=71=YHHUTgG%t1zn>8lno_|qIW;?a@oSr`%=qL*KY%EV$3yqRg%ATq)n2Jc<6sT!4Y@x+Q=TtmhAM% zLBk@~pfL)=x-vrUooqh34jQH~g#H!vD1UghP~l?1uv=2;7HdXcCu>IGykVD7L-MjX zRNe8~CvMbsN*y(>N61-=-3&?kn9)O_dfs1Q7Y@8vRT+h!V)!VH!o%r(JXiy9!LaV|frev17(JK}~3NQ+Go-Wjak%^U`d=<^Uy_ z4!Rolwc-m?^JQ-I)SlH?=i|KZDH-{}`49C!;@nY_voZG6ZXG)JMgbj$ORXlY#=->) zL^FJb;HuD|P>P;yKUMkiR!$;&?bDE;N6rc1J7N~$w+CB&giG{?e;BHl`7gHOaq`yT zjNML4Hcb&eE)n`=QK}99B`U!Z{Nz8;)}j4(?ll%Jry}Z5!JzP~Y=gpISHWonntxNF z{pk#j--m~omOrz{u{FkW;*CxT5RZf#yrN#3o0jM7jCrIamSSzu?N})iP~kwy#_%v5 z{`RhKT7%sXS-W3&$b_%Nivx5XVz@4J9_!-c zo^~N`ZhmJ(gL?%>coC|saC|9zf>-IdMi_3Mv_+~R=FyatfRO*%8f1F>!XK# zn8^e}C#o(&DPB2{jeD$mR`45F^a9roFDmN!h#w?4(p~`i zMau8tdw=|GiX7mP8wy`>Q@OaPL`g;CpC#;bBO?0#2vfq<+;IWLE)AVS=8(W-(YFh( zLfC=I&X*)q~%#=G)~O8n?HN}WM0X9CTjVMo1~iRh>WdhnkWsr zHaZ1IE$hh99}-5>8gANY3ByJ^XVTbT3p-i1oTPkQd zC4y&(g;hJ9UfxpG?Nq*0H>bSQa4mdakbIchV&77U5fmxau?Q(hA9F*5JxOTD^VnVg z?zT|$K%svuQ1WH(P_g5f5J280T5r)lV;TQ&AbclE@d3Nrrz1|!A>qdnt&_5gsRVM1 zGta2~ntb^L^{T}kgy@1qUc@|Vy2tuW_xWJ5yz%m7y~KA4+i&ka>v@Zg-jXS#l7WZn zhoaOIl{!B0ecKXf&BIT3$n6G6_bZsx8ko<#_I(xGRuF0CP4Z#+Y5|)j?7}Hb@5c{s z=WjXs;OviC^Wi*=f@FCYWrC{1lOuZkU!eX}6fBo&i)$;0l3Z&$;_r&9>E-l~a?`J4Q0FvUI zZwfvvYIrHKtRZ0%11)@U*m{Hr&Zc&)8^4VkIZT^^0tZ9(8@&2-j_m}*=h=Hl3Z`qN z+X<2sS{8EYPkX+QU!?AJ@!1o!pR?aj9e_t}KZ(=&L7-IVl!+cv@L_k**0gr`o7Jj{ z(04dJ3IcqbuGuM8-vOtiP*Sp*vrVF|W#<{4owrvw$C(4-E0 z2(#tKG(6BCBPlLn;1TbOjK~wRV8q_*>#N)|wW~l>AeehYHP$Y^7N)b>XdXr zYk|h_D;d-mYmF6(HJcj=xDq7EJ>^^AN>%W57`_CDG z{duxhdh&=uIf(^vbJ%%!kwiqg1}&}+RCI^pCb{~WX6>&oR|d?}d55%#*s5HxbW>Z8 zpc_QNcLe_iR^I+etUo)%6MQYdXs)%i^wJvsG+}rHdG=S|p-j4-R}9p?k61j-_b5(r zSdYlYcy~SZSo9%1&1I2@6A(um_T(*%u(`OIA0~!XLVGdDXtj;;B$3ZzRVo;;A&Bo* zYO4ey)^C;%y~#^gGvIq;V{~3Ds5wnV{~VQYOyC?IFXMIhMh}w`A+YqO>UQnI3Y+r7 z+di}`iy?^tAKari*Gm1GdD8hB)tZo5DqE8KCNwjpX)v>!H@|g9?g#@Px-xko1nl7L zViESU)^PuF6nbyETd0FR!X5#j6;YT;4E7H)fMP7||ZY_L?592Bz#*&l%CrpYB+i(pE1L8MDp6)zj>Dir^tYox$ zr%Qy`S^^kwVhp3hpfs=TSC@30$3FCTzKf?}nAulq@H}q#64X34o3s?^iA?D4&>3mZ zyKn0Ff>s&(1JPNE%1I>q(IflX$HO9QE}j@cGEmI6`XXL|3VQ`?i-?!G&rjkx`xBgI zHu=U3Yhev93gb0Ro>)P*F-LK6!K?&puF@bHg=y6LgbR7LbNyyRiB5D-#5?c<)~qT-MA+K* zR~dHZEyeju7F?i=lVMX5_YYrS4Xa#MRMoZZIJ%k!RayTE`@FPOqTEco@Sox{+OZr}dcYdj!AG z(ik!beCj~|f4newq|>#Nkzr3X%v00(aijP0WVMOBb1KYzb+lxaA7j{5{*mU&iFZKG zAR=v{Zl@t0S9T7|x|+C|>sL_U=NM3)L!EFV?)lE}ayZMcflTIL^fRKLkl4;+uu2tL z_gkBgok7ng50tTz{E54066`CTDRFRv{~cF5^9*F*w;=bw&7eDj6=|Ig#}Nz%ar_iS z2mq|LVtxOjFr;~$vL`h=?@IpFS=up+ty`Z`N{xr!y%PxHFny9iCvj8?~j z7b^tbiP$n!jvqCSm2ghQ69$)M7`i9+D{&XuH*a|kp;JwmDE zxRiR@J1st9kfKmc`bFiE^(`m!hgq>}OveSupFbd{ygk2U?iXFQQhB=|vFv$!4-W6z z?sH(}2TTX1h8wj_n{@7bN618vFX+K5iSLO7m$@+cCzi_8a^m_O*W7ekm~&uCD889I zJpQ*ij!$DkNagoc&LYO{F)jr6V9bm$wAd7HQ^&9ixd}fupC(TT zuE>1M-Z;29 z2r5VeS?zke@~k2f{e!v%a*f>5RK#tE$MJx zb)NYZ4SNSI?Ebrh=YRb_&P>A1)0ztcz1$0;s^C-n?0fZ=2;+&D4swx8tFVhe`4wW0 z4AiO&z9AQja`FC=0|PGJKY2!i?t@1Iy@TK%*VDQNsr7T(q#wV7YN`tDQ{}Rh$dI!E4KA&yhBKs&dmyX{LdT`GCvcde_LdQ%R{sV)PX?pHD` zMAS)|jf-jxR_qz_IC@P5CRlFi@iV?50!%8lem`$rX%fAV-8_{%jxyHiPO`%g;>K7P zy;Z7rcJ1*dgxXkb#*7^FNo)@)D|$Ig%64kSBa~c4m+sV!6eo1{x^mHy)`fUu?r|P$ zc3_ijxxMz}MZ_E}iMbv7*H=E@Br&F_ZmpnVT z*mCRX{4aFQq-|syZfbp8tmTXSaV)g7Piy9A!1WUnW4M7hi7C@TJ$rPWBJ>!y=0u4L z6YBinpnKN%C&Zrv9xUe{!yU3+>pPICy9 z-R=_3JKc$i1IYq@S$GYq`s<)F+bhCYTU|}rmWkDu7Lt9HBJ9+ zhN5$6p9cFQK$I4%R%KaL?>FHd6{LHI2}7nj4q$02sfQH~B6jZylN;!lASR5FX*)UB znm2Z99*=Olt0mSPc_TUsmv6TVs}vh=GG~6RwgZeFlgqiD`r?W|zHYeqANV2b%C4ab z*$gIvnj{OAg!U0JNcn}vpO8x&#a&Ca7@M7o7xSy(TvZt@VKiySpPxwhN?i@KQNJ4PYGuclP+)*WfL z=28CxQ>=3;p}~Nah~AG%Lo>yW!TeDb153*(*3srj4Y!&IjQ|A;BA%0xSS&?5aMPRt z{+o#)aLx@3NU5HmIj)|1%cfzG_YH}};B}|xfqm;;g4j|Eyz#R_wrgp!jIPArhEk)bM779%d--pN|HZpKNxGQl)An<~&xe7*{ITFv2!FqF+W6yh;7Xy0jzn*@kqe9r>LDl-fAG>jr+p8x4rY4d8R8g+J zp)nC*n(D&eVrHf020NUqv{yQb}^_Y)E>0|LvRki|P+uvbAMi93oN z0Y`(mk$aP?4+@YIG!}!!Xf+iBc6U6M$3&m|Owt(<;AH`<@}=t}6J8(MNE(%?XA|CH zkZF}pd{pktY6f?7fhDv~2y5IXBjy?BvHxM*615*u0%SMWYJ60)iey;H+gF(u++sX& zsPD^fJc{mz#epmI3%}5V7)ab#-loXAzp4Lz(WBx;DQW&|6TBODl5OlR4G+fbZmO_AU7M+MVeVcZaGn$aKp{))co-~0&!bn3N^4Ts6TcDx!9B&knwl` zHXLIw=|g0nF5+QI@^*(-Nx535T)#^*Sf<1HR+^@RF8}0=qX#G+tTK%EI(d47pqge- zPH#Whrc_SmIPEYTt$WRRs_KR5n#YNZq2W)6hLgJ1bHnCsyUv(OZL!fZwr(kkmm}mM zwKxm_M$l;=eEK5g95Pr!0-Lsl_#oKyE=5WwIG!f)PWTJNd>RJtC&WD0FCW8aJ%7g( zsN@27^ukezJw+^Nk@mMnH)aFYKHI?w3-dOyw_d%EF49Y=%#8bt zQxf~78PzClE}S>SL(Feuhmfl}yE@B4%D*zW&J@e7eg2=6 z(OIsy7UaK7MV9$Q^=_eP68tB8Q(cY9D}ozDTeMli=8i4!`Zl{9wYS^`YH66*YGxNM z)}X5F-1Z1cLS@tv)n8X+D>LF9NPFUprp->qW$A6@9?^XeE#u!$sFF$7Mi+U!dJ-(5 z+cmH3?d#L2)bCyKf;QRchb;ZvmD-u|_48;8wZI>*qZmvw;0!L}6;24>4n=+8ijKB= z3co#tIN{_Zutll`oh})_{$o9p3!Tmq&7Zs~;2$5nFm9X7RV=-e@%Up_7EX))Iw;=#N&I}pIN8dj_KXT-}q6ogl&fI z=7^=JZV7*U5ns?P-s#xGlqoEpMVAkW#-W+=9|}trJKDuoXe<}3Jz^>;N|;u6PGz?` zK&358X7z=9Y$TyS>WO#%G-yvleq6-gSy?cP4ig^GhzX>L$P#yOr@sK~qhwY-`$o?M2%%FR zCY#i4yzl^VtLIdRaamrFmZ3#2p^3Qlr`;w0YVn^CS6Haz z#zfP~o+1|&cWG^>h7y!|_EP!eMuTIY-flA8E48uO)@7jYE0=#7jo@5H>8j`pg)u(r zV}@@~uv7H(ch4}I+ilDLaR>Fu&uuTA7u}1b+bVabcTD(jL}>l6HAntd5t~t%Vx`bc zyGbXPrIw~HI@UYa-8qP_$Srkw{DiP!a)M|HsKv7M;(SG1}-Um5NcWUg$MUIm-4>yMfpu@lU_IsED<}g?>ZVx zdpLPFU7F#gf5sX~k)0Qb7zv-&gD!SH!F$|{7e1ueNy|Jy8aP~7zH}=p1$3j6qc=y$Ya_FbZm+ZEBFMUU{G3yl28Lu-xSB`JyfvseFhDRCS|R z=)&9`aAxnWC*-j~EYG?;8WkL?_w)1Y_GtJJZoyeZ0K<72*rB8HjLe*qvRnrCWa{D{m<}9dpYiVN{rV_P^iWOuB7^u`A zBsqW}mDqhrotKkD*Ube^Lyu`&;AsUN0`^YBEmNin*-$+y&4q zGUGl3_0d1>ROKf5NGk$s8s)PM?iUe>=MIjvj1P#&6o1L=_vi(CCyhUP4RK;JnX5^R z1Emk~V+qJfmdQ--{868blORTNKYacW1<_vC;eS@~T6yEyY{IeZ9y)k9y?@Z$L8_Bv ztzgW9*Lg>aVS=^%j^sUjn#*?qi#t*((;8|`a53Te69eTbS*raTv0toRA`JCj9jHBt zH5{lK@op!fe|>>w4&@{E?Qm~4xADbi5OLH7z(i<@kx8A!+wD@xi_D*&t+l}%Lw9eG*~PbO1KGh; zTBK0GH0zIb)Q(&dxXM{YD!V5-<>)7ZE#1jgdN@7$wZc%T$&i;d)zy-tvcoSJ)$z^s z3B(zusmglDZ&TOmlc(CZhpPcK|VE2 zt2_ScU!{o7H2=O7Q4jIG^AHq5iV}}+x>D3*?t?NR^4RhxGw}iej>zmWHV5YK(Qe@$r~uF zC=6SET3PUHoZKROkHrC~^VM_=NGrnuevk_?(4?ErO(#8uAP7RHL@5IqY{av`By8Y<-_69LMKla?-=4IMhq2`F?{f>j3M^^ zIF0`~DnCKN5jz)9@saXi7^rgC;%#Id2{KA*X?MAAr`unmDY!;!&+rTU~sB*b=(+#<1H~3zT)dcaK2fv zv-0V{n?>HWj{61{lu)HeOR>>0Fb#{Mks?7?7kI8aJqn%&?7->a6Lsu;#79_KiRkW| zmXEqGnCj_wDN=>w%f!@#@&%t-TQXX*zfNp*g zA`}|zX<~;p`Uw#Z#9mQ80>6(i|^^oA>(JE`hB2$O$bgDR>)$z*}t zg4z)vXdlZ1!sUfiwE6FDsCv-9cv6DALM2DR!qw7*#t&8`JN3gQ65oh6lZs+&?gc)I zdRh)nS|J=AI{r1g@iCnYDW%jlbS*xQD>GfO26^6Q=z7qT3x$ikCl6UvQEn%rQZ^y0=R4MMSLU6SKc3_vG_uB4#Em?F1}Wn zn$n6NB^@cwpDdh3+k{7`q@;{Dky=t7(EIf5^7|o{HKS7V#bhzL72!b&FT^{O%$uK4 z`h((5QAKuj^w$LYAhHi|{Tl9UXtm){Xz3ROfGoEg_3oGPJ zN|i*iymWfj%>9It-N{5E^dAQF`U8_+sQ z5`QGxLr;;&jBqSNHCXG3J#JQr386Ax(yD4<3g=9*YVf_XhZBS${N@)s^L{{3Uz+40 zi=PMCY`V%jNi&;GZl*lI3zH{RCK3}KfPS~mP?Pd3w;bmWYQFu|r*=+)cF+n0a*k7QjN-1Mj1wzigk>Yr7Mf0ki$?K+T?pA3; zseA8W@3>ml)OS}vp9vt-(%TQa^>{@B8WT!a#3hxyE$j!G>4MHk%<6(tAqN7K|+}qo?YO``qpaX;cpC9ky zLpG*;k8Id6HQ72#6_?W9L%NJ`C*6hnGftz-_G4t3JNx zo*Zk-GhbH46K`QD*l|fJ!|G~tU4O3T1=G=D5~`?}n!08xnuzc+uhMKqrQVeU__sLn zdOaz-Z``>K`I+S7O93MSb{-t|zA`?VVulI=k)Pgi#%adVe=(x%;%w8M*SLOX2XEN{ z;&xFJ*e~G=XgMM~6?;ICveW;@?ClApWrw1>A$_zRI&$^iV;@F0Beje1A8KatprV@w z?o)@;fl58q1HALOnDlhth_|9- z7Jwc=3|;+sy*z#gm8N7J-j$n@-&^=HSJUyCc#QIsy z%Lqw?R$sRRt`S^ejtN|01qtc)E|v*G4ChK25ii8YE?0zT7Ip^YGFU`1IONN!XW>*B zF)v=|a3$6f!V@KA7ZvexImW+VwMOXMnSs5_I8usgw^i#;UMc->Z>7>zm?Utm!CWsM zH;}e;JaLXSH+%J>{57#Jujq^9jCuT&2fK`1mf|%-pDOZ^ha<@Ngug}({R7!e$rSF1 zDSYPGyF=eC@VLmh1FpYG-Z6hu^+$d9d%Q^>5evFPEvZ2UpX>+B54hqHAD7)~(I>2T z7KS;xl8H$U+qpY_h)|4tIEor55i4-#Q-0rnGG}tvA42D4idIjXQdqHQTzU^<# zM^2`6I-wIt?HD#73<+{IGznH#sh_fPQasCMOFQx7z29aPw%#=FMyo}=Auk6-Hs-7D z79kd10y@?qCXZeY+6jtixUV)!LKm%-zfV%G6ts6g3>TM-C{Hok65f4sS=df;8V#G_ zmC^1SA<$bnpGhesbUmb+%#(t@c>5&HGii1|q%W=Si9HWHbIye`WV09r9X$W9u$WaqD17#>Oj^ z;$CMVI(Je&{x+NlKce-JS%xq-zV}n%#JbjH@A50S@41xVcxz5PCrxuGy8ZDRm{N4D zYklX%(SxGHRg{H>_QTo`9|vlvv#q$vNEr#$-sFJ-=oNSDkNpV&!Ab*&Eiafy0bt(_ zISC{?2buGKB_~?M1MITr-{e4@qy(|PgT!dSHex`lyM5>hHvA_fI9KcU-g9T0%lQcB z{d+%N(Rzkf_(+mm>xxK`T?Pve4N*KqZjlv?xfKkrE5_P0l0aM|f~P~6QdN+# zLjxUM*ID>it!7LRu}%P0;V}q!xtwK>ab3{@eHK zQ2m$kAE*EApZ`8j$Fk@?e!^45dj9bcA*$@jf1&7uELTsnfyRhyB*!N(Agq@;J5gE9wiuXA+W7fg)q5Hy ziEL95kpj%lLNxX@oy98sL(gt@70P0UEI!0Cm~s=#7!yf^H=!kH6VJX$#->~1{kPM$ zAB}MYNk2|YGHi@P@DnB59nLDZaxNlX9JwUguvaLv9AZQQlyB!@wmaI+F-Ahpm66Jj z;{c}U*^M8RVK`McC~^U26nP2 zc*46{2(67fXlq)1h~EHk0I+j>n#+^OLmUhOnbzBC^b%r6?+s4YMln|6>f|1HP`c9D z`NwbU*?;srI$LA2*9-j4&OdYb=y60mvz^PO@3y01P_gjC1}CF9jZc$_bB-ozP7JZJ z3r@n67&l4Kd&LCkD$W_?@%gK6UMjG@#`aM~0c)WoC6oaB?GAF~x^%}5fvUn$RE`$UA0VFT4 z{_tO(&U6$~9fv?-AV(NP3?O-sow7l&!<(>uGw^!@AaPLgvBh`fcq|q}0tgW4tzaY| zPbT^Ey|LC0W^zfJjq^IvHXRcbN&kWOjN*jR=PGi0TT1G;slzZo1Ct!V+ zbDkW)K7wyAxuks4kS7~sR*+`dm91o0HJa1R!OE&127CvXy-7iUH`men z4r^VRu!t^S*mglVesdb~vL7g{{>3I6Aej%Ke_@lYmDw#p2Uf;^;?Zr81$;mLxkooh zSa5#x=za#ft}|c9p2%j8tp?OstqiSmv(`(yx(&UbR9&J4Yg&wX0HIQCbu0mrL|B&A zi1#@fFrLP)Beq6q5v%WZGZAMjzjwSS<-(m?@&B*q zQ_{P8no`vfm0JfB{14*pLUy`rqbcu*FJX)!E;zxt9Fh%%?N%!m|~ zmmKzAXD=Zu4VrSHq9GMF!K30^EcpHV`L|!puVTYsCcHDy{~5CL85(rq^m@?I&9Dzo ziK3=w{cb**|6!*Af&XzH*nXxCJQH}DD@XH{H+Uo4_ZlPD*LQdPYVw3|+&XFE{gwUi zjj|~4_Ox}AsOJ_h)rBK~HJW58|B!m34ooDgBkRKMv+U(+GR&=C3V!BK9{Smay@l7)X zGdv!@KiXh^){qUPJ7-#FOHg5ngJ~bF_pla+rc{W-)WJ_O6Jv7S(uL!09zH)F9M{#6V8865) zN|hEZPqbKoMDq`nkK(Cc=3RSNilo*ru+A;ChIFO5W8l)t>0t<(f^h-rQ{}2rdr84h`0#gjz83np4q)K+!hia~zuI zI3-d0lFR%Fj=dupBEIi91CuvRS&^5oh`##~DIZA{GKb^es7!-31%N>vJOkOuiSa@l zWbQnk)qca`1N@^2Ab5qs-=c5AKgV!8MMsBGDsG5g|4{Wo=_a z8*eOFqoLw+PT^r3>Lr^ffAGF)P<7qUp}looUly6xgWX8M(!j*p`qc}0wGFJa%*tf$mZ{rV<2rM?Py53XJ7d4 zCEKu>`JLffrXO|$wI_Ri&|{nRoc87%N{Ss z{a-%t>wxQq_#v??6bFbo$5T zit&dPfX1Drk^DdCOx+Sx0tVNOhhGMen0La$0X8dU`TD@>&ZGzgG2DVkhoK6ccr{@f zX`VxR>82`dgjb{?i_AMJx+^>}2cdZFX#^^-BeMj))N^h>QaNC{m6PJm#x-A_F6NF9 zv>j^1c+>4x=4k^*vxf@A0$1-ASio-2yEkL$#VXx$DW{7*v9C>x&5|+F(9Cm7>?~?; zFCyqH1(dH)j??vDv!-~{pJrX65I14i-wsR+ov**nDj|M;ndgA);l4L*qB4oc52cz5 z59%Xo#PFPG;&;&cyA%_fq}Cs|FSlmgQT)8SG(QyZ<1QOhgUeMZ5if3NYnRhf&*xk+ z{eJVWRhHk|=jm_>#NOcf#+$%}X7f-4FEhsUpi%Z3Y!E{GE>CAP=1A+(ePxyfd35P= zZ@r_q?(m)%VQ^=UGiNcg=5gy02t`8?I(^d7mN%QI?+G?Dm~JG8xib(B@WGiJ*+Wi3 zPCljC=oU}7udE%Vz2wgNF_zxwc}IdV-sO({fulj9fhAyLty1Q2QD6C^wo}?0(4JY# zM>iWh^v#}^zlwG7C_N6bz7srhMd9dBJK6Qqh@moVeWU155X&sfEcL9zYk5o)i&Afb zWZsQ4O77o96{lsjvR{foK&0bCxZQaT!(80goy(>!bsKNr`bB8|5YeSz=~9Kx`cGb{e@homQ-(}KtARL!$pS?h-EZMs{X8KS*MmB^fo$H80wZ)% z_?f!Pd#jfNG2XNeWgtHFZJTmVB8JwFgjR@iS_ed2g*!UA#^BtpyHERl77C}?Ht06V z(y@A9)yeCy+W4$ktI;uJm65tO?=9k7KhXD3#%v{G4?ux6>;W`M#Q+KRsXUO? zC#Zpx5@g4lKM>Q-ONi|s$m1CVR!D-*R|=&67>-Itth+L3DK7({aTeXDAv*eFetf~t ziZh$Q159}71J9%^Kl(1@k4fK2Z@m(4@dfshy0+2!p7h68My5U&ngpq9Fx*JtO^_X4 z89gb`n+!0E_}Y3IGA#o!4HG6VZF;t?FLU-|nDDxoauKoz$)C|ry1#$DKro(;Rpp5> zDpBn@$6)c%NZH6eQ|*~qFZ)<>y{enj?7fOv~dvJ{i#syGFLtCd-Yj=-@^BK&o;w^I;p*{9r&iWJ@wv=k-P4v zaycFE%gtJ$^`XtBNF4N=|8%1ipblcwnjUjrxf#R8F-sNlhC`Nc*_0@ab?~|}l}Q`~ z{l9#Ip>Va<~T;9Z+A{bVAnUbRkfqKOr|jS*c-AOA6wU z`2YS4_q6b5=aLu8L(^Us7flGO&jY?FMGLDP#Ihry?I zVc)g~oVNBGtjyiD(NxTR9&L%am!9|vN z$Ap^><{B$xh<8HRJ74nz&htMXkBDZ=5ur4R^oUz1yck$WZ;anYBz$+(%i+7b{X29Vqecfw2?d`!{D3H$UW5XUDbDvjdNpI{PrA0O| zM)mFHvAXjpVDS?04PHY>CSDLdlLRMf9o-}~ED@Cp)C;3o# z{Y3%jvaoWj=M5?|PutL;L!IM~7KKmS*8Dg-q9sx>3O&p?hn5!ghg??fdxvIjlc~)Q$ky68u^bIPKc@XWZ%PeZ8ekG29%i zmsqc*80@tt*v$aMF#0f}Y0la^xM{g>-ScAfrBHl%awiDRfm-^fBC0)?hMCEylsfC@+G<4~RMqKURO9(57=h_C=IL;dfzt@sy1~V)`&mT{ zx^E1wh7il5SY(WGAhXp5GFzfy!aJ2IPUBBJ50!)-u}&Bg6~unL6`t9UJ<7Hju}v%? zRNKVjMk^k_yJ)rQF7BV*IfX6`Qx{~oKpC>x<-ZyqW+!=cWog7El^~1q{Y9u=L_|dM ziexP;d?1)2TFRnYW#QeEFHs9&1(S6@Ao*)Kct|jHiUQ8se}?gan^X8=PNe4 zjx|Ky&DyYiJtF3pHOCw1i8u6xii-Y<4}naswL>s#8JCJ2-8qk*990dv)iKrvaYAIX zj^WWR@`Rf9RCad8uqcxov|aks8UF9l5P>40-XAhJ(Z2Y%o9Q;1^mYQua^>HHs5e&-jZ+}2;PSKOT?5m?#( z${F=d-0XY(`Wk4bie1VxJcF&){9+>T7y|0KP++fq8Q8184?k@gCUI&CDoKKQR{{$7 z*8Pu@NLc<4Kc}Fu8hQ}9*hEpdsQocios6vo&IKvY$7#9w&9~%*9t0}1BMfSU)zdhe z7AN?KE%Vvarfb;Cu&cyI>z^a8{swDs6&a{OvFaFFHXrP&C8>N*i9~ z#WL`28bM~Y@$wU>tDlH=zV`Iv-v9xdp1na<#?tHMjkjjhP@n^W=EF`*8XU_6{L{Lf zakyoP?%FlV8WYCjLTYQ9yoYO+BxtHJ^x|xQMI4B|KZZQ*pN0oK%FiR`+CjKSW{@;! z&)|1snKs}~%=IADYvDaU_R~uEU%lji*2@2HJ%cZCO5a4Ic=OuOm|hG0C0^TW(h+i@ zdUH>gwwY>c1ZO|qIPEa>TZI4b5;|*F-P~k3a8{e+FlEFOk*1_ZLHk?)Lt#~mWRnLA zV3XA}Y>V!+Wn-!aWibYiNMBAZm6;vDM1jA0R&IPlEKPz~2f0U4nf!P_b@)y@k?C6# zhPQo@@?jntohz&)htrfN$I2TyW@<$5^}^T9xzd6y#hv##w}cG;YkN!9Ol%zx114NS zPJ&IJ5b-4&fqHJZzlx}fEv!evkDFHV4Lhplu`+<=@QZKduU?wYhJX71{9+cQvT8Tk zjNRDN&O;f3ZWeHWll*Ge2|9>aoQQyAQW;xUmiOn-laC4Ov2IcTCG5p8?Ug*8L%Mmg zX>I3~A|qzX3_d*lY-R01#?{2^{EEQ@|IPI+xe+^en%RWHMc6OX(~tZw-HBxWc1iWa z#?>FZS}|c-zYHP#m+4uq{&N<>e;L`xbalkZ^#4HJMn6kb=gl^buOrCmw7dk5(p+a5dCJsZ|_9Y;uZk z#iOOmo~L?o=J%^eQG9m=>SPJvxc94x?C(}v1vsk!NLB?j&vTt8ARzC~IJMIH#k1)F z6fwn#^Y~~W{8IaOkvSv}V_SsxN=WhxBVimsmHM?Alla+6f{&2$ug{)s)r8K<+!yE? zKH0+)D@$y4lq)DSa{SAD@DjYEI46TYie68x89E^9`RFDR?e#$s^i0m+ zOc7fTJQ4Kx7mc-;Bo7%Q@|Sw!$Y+Sp_0UT7*McpbRnW8>1ytWKd|hk0jx}=oF;B4^$>0Z|3);UGW2It&jmIet z3L3=y-h%4P8Si*oY*WYm(wDV;5Im<#KFc^B)~kApts0{bJ3DAwy zHuYY$Ce8ab6t7KpS#FCR7+Z;@c)s`2$=mu`{1@+0{U~c>*?mioi?$S63|gt6^5jc3 zKeVM%Qd)fc*OCAEe|0tg>UwjsK6ngFhK`0Gi()HC29iLq@M-cm?~nw`lB4>5@e!Lo zRk;wH2TNfKfOYZ7!>fi^j!&fr_S0E><7_NG@n)YRq`ib9%UBi7M>c$}~_FV8gw61wa_ugA|}AHu)X+)8c8X@8{3qostBvH@(`d$RfD2smc=p&3eW z4n#4{eTcJD+^eGts_UevD?nzZ$JC%8n+&S&zka2_d*PR%m6b8!{q+f~p?5B-6%OGo zwO}7+CT_g0U7UOkDr_AZOAW&Xme=?ga-+Ea@(#%o&ie#vny0Kf^;P2A``8k7?K2i zI55wx9pn5ri3`P}&a2P+J8q=kwZPTq@!y^maqJaSo;|V{(^B-D9)=6OTA+LGgG|b` z9jq5<_112*FYWMjOaS_;-rhpHx{%qnggCFcl-9ytH}Ae$;w>S^gcGPT`mkStggDYw zu?D*+y1Ry~-^rPO;E(4c#;V5$rEF^=XD3(DfAP2V?Z1-4p3!ogI?(=U_Ild+1(5GG z)^lIPOC9eWB_yJHvNbJ&$lnr(2x6tbY}jHv z4274hAdj%S@6*|GE}pO2Y~=EtOsMK-yN)TG+%F0_vG0`MB2TG2MkC*$OB#-2dM4q8 zer6>S=B4< zXdN9_%xUFNpYkx+rpn*Z9AAVgVv@$OQ>6)Mz>1XaQY^n<)+lXPX}{ z2YlYP*}vU^cQ?{w4!6=1SDJ8#ns1q0lkfH>sNa@}(a_l#UZPXHNiy$|hu#paBMBM_ zVWN0$E3?TRpLjg~#yc7h?a}$N_}+`*ho#zsR-wa?t9Wa>$4zvVJO(*ZGW`oXjxNG6 z_JRHm0dy?->&UuEfQCvQI9z_od0rHmrRwS1?@KE*<>??pF(z~E`$u&MIRrulA@(O< zX0>ukR(we3!b7V|$zfT(r%%}&ys#|7JAuN@H{I0n-)fZIe*$ZT9`=bde+YR6Gb*np zmBl7@l$NeXUbxB=UnJ}quVuq>dwwBe`ykuqsnt^NqAO^6a`lsoiM0ovHu^*n|(>MZ{8!9)I%ZbonK-_j~eKk<~Q0#cv5*c~^fz zQZJo)5znq4UgrfWBoJQq1pH}7{NKn06R#qhZ>?i1K|Q0}Mi2TYF{1n@QEJ<*$)3LXBr`co`=(J ziZI1^2lbG}$!7)-8e-74b+v3{R$IzyWBqaMkH!k$ms6C-5AM;<=e0ezNNfHM8zetg zbg-5l7Gxk%381kbq{g_f$1P?Sp7hr;s?t}UYFHH2opE2Tfw1+%{0%>80xay4T=mD;5F55SE2X5_z`Ae8my zjYhkrs`IT+LA%x0*ccjY{3Pj1=RJ@lqT6GeO?NI?AWf`}_S?DsP)1aUNXx+Nh)FQ` z-W4luy7iV8R0(Enq0L0lR)26eZudFO#_vSO`6uM937Z*^8?up>Eq73eUh=w5pcQi2 z;k-FC{NNI|n6>)G&U*`@nuJK&;iY(lSY?M~0}r65*YowR!0vTCg-);q$w@QPOuaromWHuklo2If8*wgs zX{I;bce~R-+Ug_!j^`&Xa&>klG-ZD4L47@sf!yAx-80VE^*Qf{4W4zti9ybxUsJH)mNJ8}+e*$EH;E6sn_v&=_hKKfj+sknWqAwZ6i zF1dFkf$w}AoJka?dA;+AmcC$b*awdUewN%DIVn*pp1QmF#yOIidZS*0qC`gV;mvcJ z((l}!QoLNgo|Ytr$kES<3h6(hQ43jAY}~WXA6W8Oah+tHhJ|5ZX|tn;1F7yu`FG9> z!h6bTuRp+c1+wqQ`kxSsfqch0rjkxyvnkJ~v8-yYczvuI)INn4BlN~nKT$dY!l7U` zv$O68$)bDb_Y5k_62?x%mcH?z1w_;_$$~o&R}8X7^Q7>r4*fkElI^(Jip@*IBdnXk zt3<+DT2UAxo{xI%+dM>xs&ZKAsAG<;riHr+s7E_{f}Lo63JOEjw4VJn7#*>FOMRc`4rT9V|Ka4kHScH0HDnh_QK7?#CGDK!!e={X$V&7 z%kfK!ca)Y^ngs7;)68TKEROwVf(uOPelzwj3sW0GpY_4@I0t~EGrUpIh2;!__n++i z{{uP9h0R!Cz`8K1d=gn6JdM4DY^^>i_z4+iO~b>u0pag02T&UE<&0ej%en)$>+=Y7 z2=WM$)h~1A=!NpjqYvcRbBh3elau__9rl;M31I)6#8^4|^zfV_V1y^4U4M4j5 zK@}V^d-{(7*z_4~;FZUOyU&SZL?WRVwWoz#;L>=^K#cF1yuXnYVhj1quJr7A3 zMs1}H$OLKlrTENRUs;c8#%e_K?ZjH8qh72z9wzE}@#v`4J2vp1NGCfcgTB1(G3Ilh zfy<$tqaRnZ7`p6BwZhI=#g#r+itnTJVSEo9aD5Z?hzm15>A$zE0orqBj0dc9r=6U& zq(y4w!Uy08V&H3b$wd2=^s6>+x#LXXC@fuC@gQU~X25lV^G$R63$;Gf;|@U;bU3!O zWkKIcPV{c(MdxP1258Y_)li2B?7k0FXN^54`CEt#6=CLWNVd%?EKXxJW!9xg zNz(tp-dl%7x%K^{gCHYF2!a9vgVLZ<(lMZ$R6#&Ox)cTJ24x5ZC594^mXZcRX^;|z zkd8sRQ@UXo&*FWay*Ie)J@0wmbN=`puS=QnzGql>t@VvhL>x)-jqAKm&bpmJo;)3d zl<>^}1+=U**UmNml?X4K-cq=v^U@Fql3gQZI_ooiZ8l&!v<^A*sb@AS_vsy3RKKWg z^Elp~c&N)I`2c^x z4}Hz0S>^s@`S2dyo6A)7JrOMrXD(>ryFYRM0Wm?SVqtMlq*Dj$JE0O)f*Y+`NuSbu z(@rPx#a~Go$!?92Dh_^Ds@i@lVK!m@foJXwd5Dwp09IQHn5F!|p69=EpnuMH z?nE$0DST+Wv7h(c{^|tRXmJl%l_cJzblt7=r5KdAw9{kk_(hnLJ@xt%Ix53`k0m=94_aymA zuxt2duFK)B(INEvN=g}$onVeT!w7xGkMWl3DmytA30oVYTlbQ2J=o2JShQb|R8l=k z$?fTuy>yo+cabK-2!hKL52dU(rx$wj@mkR-WPpt3WC_%Rw1l+^nkF?-{CIO-j>$S1 zDe?ijK{uCgA(17CcEzhpz-jVr-kXZe3DTsBmp?U*%d z2Z!Y=BEpuGJ^Np9rC;GooS}Uf^Qn*0PjOI}+8L{)M(NXnjD)lO;dCQ6QSMB$i4b~` zv0RY@@6Gh>%vsr9*5;Uk&rNYqe>IY>?9Z0m=ROSC#g$OwVTPWilQoMA!gYztw zmdJBHWEZXR?vp!HAeZG8B>@(L6quq^gCb8nPMgGPgSpvlwuhoo6k$W@EM-snrVF>k zvelsj0+S{q`v`r$pza$xJr4;b<{BpF;Ha*WyyMzYdfl|<=LX(EL~%UzuC1zKZ+3y@ zEDON>8yC!aw*xBZ$qXlYmq%s+CWbPq=MP|-X*~%bc>58cQ;Mw)+02fJ!!ShSw=@ty z6l34;1LiQL-VPvQ-Va8{8w;)m1H_ z@wVe#ovY&xadw8RW}mt17y|$D}LFdlOH7lNQ=Zyb*SIeWn;ZB z`I!8m8FTm2l?#X#E?_dezuY<-C8Dvy|h#_Txm3ovORFJY{n*P>s<(b9py&RJ|zQ>%GT;z{c5;ui+D)@ zdkbsqRo|tKASl$kuZk8_HzMzpL z)v;uZ6c3sW>N`@`P2BcRl@=S? ziLt9ZRROSK`WP=GaMJ)hai=Y?);%t#i)KH;?6UuDH9Fo~(ycdL6ruZEPKCVZEKF3v zy;Z}owSr|(?>F;{lV;6FvV>*H*IU{}TLnGMUP^i$deb`fIXair%OD3>2n&d=9ipzh}wn^*r}?WX2cOl=lJW=4_Hjq zP8Z-#A!?7k&fg0<{MZN|H`YYh>K{zQcgs$`*6ByM>mSS?vBJiWz3#`<{Wm|B@qFy? z;_bn0|F8?+C%;DAXyb{sRE{-K#-e6mOYzcGyPAElp-AkW683t~vDX#dcPfiY`&wfQ zt_%`9Qf8TkaX|Z>!ir-_Y`o(+JFY&1B`tummF>g9fIZuW-8P%JOq*C5_`k+g40>iX zbqApUw+B$*^#CpN(_6xsx`TDF6`=e6B{h8P?Y{;FD0M%5=BKy+uY84!)vF5gCs_}~ zE6f>82Lo#)$34HJx;}kAk#7MRVFB1^Bz9j3d!^VuhlQAii{qSRo1#2ZEKUN7?t^%S zT9&lR)XSZcxz6lG_O?OIa*%YB4IE8R{~fsHNo}kn3Zu%g&XXkc*h5j8i$H;POEqHi zvL@oVhSsf2Ny}@)vO1=o@v)O@_0rPL{8OaHK_EVA;^pICKV8KrY#3x&7-f~zZe3mi z@c@wcLq09N%ReCZ;GafCey-~Sn9DO7DZMAP5hB$^Hq&Lr zX!v#b+V{RK+jxm`WeRJX(E74Jil+IHaH*SP-XUQhC$`I|rDd=L} zvFLkEu%-N=HmPs%MX{zUNt0gh@-d(FXLmKsiCAjeTjIW!-}Cxn5YnC~Ueadkx|Kaj zV1Cyn%_WTX?b}2nz_4T3+!R`gHLaA`e-tecx2tyG9aM>7EzVO?xJ>_uqxX5NMfrzO zLIjZOgkoEDPa8U`Um+XKXK=p-?<+O~;S5IQR!`()_1jUOUh;V!U))(lwO^g2d`uWDPl-$2@zXPkoe=beY z5S@4;AAbIW4%QUbRmM@X#$UzGg~9s);W;!XQgLtf)28iR3e{)nTM&UM)C9@KO|Lgr z(`kblO|Q(A6B*~CdiTote9EH|nDkKzY+j6yoo&AV96|z~Y5+RXEr{#qFJt-k0V+_R zU(>v-nAVAZF3E56_K8sV*O2(==Lszt*PIhs2`DokmA*fUNv2fud_%4Cr#*0IDk&-Y z#6nfk$8@TM)q4~d4@)Pqot{PFEVOg|%^pi?|M-?cS;XZ4ueqTE65oUjYTW3KkA>Po zwOp2IJv;oa-L?#|_kPq*iySr2m0h{Gl9 z8W+SDTemDuP0BkGr3TwulMJ2r4C_CDp}n4q_^G~sZ{ho;IVk$Oi&})gMDERo7hX2y z(8rUd^Pu*)rJ-SHnBi>Gd+%z*IKe|5OFTBFpf`NvQiQ^d)j|s*nQSj5?tL-RH7yJn zzNBA-zW0e~+NOlNhPq8$R_MD@E#rGx!hLQHUR?0dSKv!tARr+-#NwopxQ^&UFw$DY} z9bAbWod>Twk>0`C_taUhuf3+XA4gWJC_gYj$4t?f`rr*O;5O#IShg!P$c+B@$uYDb z^vvrWVptbWL|$*@Yt&q#&dfT0dxJzcCr9a41oeZ&k+sXh%ye?q zM7K`8L!OyUq(zQteEJZ7So9@e$g{eUdOobfoxeFxNii*tnMk%`v<#UrK4T-{id_NH zT)7ST7`~(*5aU5i-A<@Mu`sPEZnuZaPCJuP5oc1g8QX>puS)tr??2iHI>@m8sda}& zxbp#4rWKAIuG&n@>NRh!0QUgnBQVulj_%*Z~P zI)(JldjleUT>i^3s-bXF&2X7;gzJ5QE3YMmY7L0%JP2^{*daoUh%W;#-^V@Dnt=;b zhx}rw_U+qSi+cf*#_=}j0A#r5c)b-7Z*7_D;OjnlT3JygHZoq;y9okf)i<-^1r09l zX`Y(vv_Irj{^Zwo19_iBM|M5mx8-5W#zqidOT+h2DaK@)1x^XHe0PA?I-AkRp5c`% zmZGSuv_x^$TkXO*gn)}p4tm|+6SlTMAL(7a9$N3D2Jjp%YyF>FcISea=Fhi)zEiF# z>#Yh_7<)Tz!kc)JePf_ao~7Tspl55jhlRO{z1z=IE64mm|+ z%h1}M>)xn`bfP=(ymv{?uW}f(pbW3*EH_w}0l?M+IqY`eB%B+crK!p6WuYg(;1>N+ zO(w?6VtEHIn-b+bs2Iwwvd%37A-ZqiMcVw0cB5PTgYcAwv9ivftP3?#l3LTiTSI7r z^U}Sn@9$H0-fZ4A5;ua31y(~Q-;;L7lUl8{K1QE@o;0#+X3Sq|q`aoKK3nI@uht~Jw;wsOHWuW|8l_N5@2{#${m3GiVZ zW9JE5&FSjWm-BYHvX-(Fa9=KDlHqYOlz3t5oyI1;M{7`93 zs7d@ZoAhYZGU`=5ma$YuF)EDCJ5jqh@>c zwmbCFRjhGwF$*h5le;4K@XG(jotAQdK8?t6+#WY&%qa32S)Qo}68^BLLPACv6*_-=;Z4cg0a& zgccX+?z@%yT9dnRiY6`NNtXNP01?>S^Qv%@!BM3Y*flg`F2UBO7@F2(wLKgo4l^!L z)(rpp`Fd7qXXk_9#Z~#8f(j>Un4<6@dziBi(}ACMJccySZeP9qyaKw6N7JmhFz`~$ zn@=~Fonf<-zNyvwFKeRqaTA_y)43tGGW?0#3dUoW_a`BwR{9%Z+S`3ik3G*>!`sZ6 zpZ(I4^EjuXRrkZHP307jt=0NT5ccTdJK>6#Jtdy|bN;9CPUEEi3Z!>e8&odNS{;}J zfm*#XAh+-nnCdSV-DwG?UDG3qThaB0-q+t!v*OO^;jmiI;q%tmbsYg%rnUAttB>OR z7shvx_*5aEUEm`Iydod=B+sIt)GIovL3LIioukqQ{U5K17#LVGEh!32nxCpB@<_b( zd||Wfgs}cm>*d$K|C=qCg*n{tKKl(*b^{3+F}I^X>dzh5zkJM)p@ybsUuq{2u_GH2 zCxvdzAr!{*e4dPmo|`Zj(w22B*D=A3PeE{m{Fm@?#)SEk?}nE=^<(V@6B$ELW?kJk zT4Lsuw-2XP!#sO%6Vk+An#?POTg`Fu(u}TR}h zr|F|HTRDDnb>p%6H~a!_f;LnEQnexaa+EuoiYdL$11gmegGT!1QO%uJ8n2>2Is40A~3{!3F32{Z+b^@2q1K4H$UBpeD%%hsFF z)bykj>WQUHOq-iG-4LKISpwEkpd}v8 zu9#}&$kDWA9KhbWld_Edg+?M_`^LzqOZ8HRtLq_Rzc*Q$vRjdmOj(v+zO<<+UTu{R zs}EoD#rEc@0YkszK(b!)6Y)Ff6cd2k;J>MeOVjhSiF1?DgPvp}?;h2;WNYD=)>DrO z*f+S~`Yl!CI@c2>{QZXwLGPK(KQ@HXL3^Q#W2l-L1{SMUl{+z&i91->h`|n&k-9D2C zL_D!L0U>CKD3EvO#rn-H`u6nXeDw%;wuBy&S%Sz6H^A-*5Fw5%m#8ZI>CW^K-Q{n=;^tGMl3Z#SG}~9v+?MlztuTMS^L~bOlX&ZMy!_(Xcg8`WTA0W0>Jsto zY_T+T(i!#PQ(V$inPtC9ZlaSZ=DUjT2k~a_rA#5s@AoWosWmb^RHUF6n*Z*ymCZ>? zK13YC2DQ2Deb@lS4e1UEc9s|k&v|fre9opt0D*JJi450Vn6A1-(Js{)f1yb2)wzrY zf@=YeO-v#4ODH;yEn{abI7d&ed|kYJ)0}8utCMQv4OMo~^HTpO4?mAT<>{RIN4OQq zXnon^o=H#2+B=c#OroA_Ca-mCi-ZU63&=6jAD(rngsYWD6Xa;I#Y!{TUL0oWWPcUv zO-PlYC}==G8Gp^$w=M$9EH_a?jTCkYpR%kK@eF4OxBy*6xz6M~qZ>Pz$5VDuT#Dt; zDd(wfswNNXxChlX3w|72^O$Pv^lL#|#m1&7&w!WR+#~WT7^WLSUa{l%} zDT=*DHP~7QnN+6r{~+eilSN-mP|1%UK0|z!m*HCX^%SBpdg0AHs{B=$-)7~gsr;4U zfIX)(Q4Kb61w@l99^!p|Yf8H^6&8nk0B3y-vAYCY4Rjtn0Hob9Ex_R%fNa0NHwg`X zZWc4wS3vuQcAp$Nb%t^ZesVAPIL%t|&Mc_u0kGrbr&OvI2WxSc8|IpAH%Y|h8P}Qo zN;1zGSVL2t^DvBh0$2=yL|&-EI0l{g)noz21s~|q(#~mrvlBPC`2RHIvbOTo!|)AD z=SX2zMh->S*(cN(^CrZ}K_Y7TC#_c60S*SlWvC}N-cGVh4~x=+2jdJf1`xrk)^-3f zDogR~;~b5rX6e9R0RmT8OE@v&pH3Eq3Wpl&wL#OuPCZajK7$Uds)A!r=K;C?WGq?U zC4@Tyb5PPHE->~0_eHH*8)3LCyakiOJnKz;Hb| zyQJnx6{^RNSoXxo!1EhDS3sPN1I?eQ(EhvqTt2xy68?rB3R+YQQYd;g#elIzfUyFu z_=)@Nr@@K`O0+C<4fHHyH~!>AgF655E0e|WheprI(SE|FL{V|#oA5kx6KC;AO5nKe!vDM{bP}mRffF`Sz8J`NrzC#Zj>(3H)-!-adj^j# z`5JN;K(Z$rNcIdNFqT0l)KNQ5 zBdB&4<@$0$saT0>@43iIdE^YgA;A`=rC@G%0;cuG1?fWIdHus%(DHPAd!Oueg@pmds*?A`h0`A_Q zJ z|A&2~oEbYJvYNe-Qz!TS7h&S5me?_(gs@oSfLXeHwZFe~min*unFy)V7`X9Z96EO0 zaS5rne!9PPg8ZM+L~~@{E`4VL;WK7HehDC23#o3uLrc@q{h9o6bjn#V+aU{^1}$!9 zA;v>}BOXjTuG{~aw*=>JF3x&cni7<&76~uNrHijOsGPkKK(J@$S*`3DHYm34YV7JE zc8qhP3zqRC%^gsk)%yStH-)*lzY|jau<;$S+3}{zudjc~gMO|vJ;oiFJ64-Ho-X|Q z`kzb@TY5L8(x1)0M7aAdV&T5s?Z;(2?;~nKyq5;Y1Oea4%w*P!_R?C)S@aLCk)$}A z&6Vm$;(m6ZySnza1O<1F{bP0WkwoJ_(1y;Hn_7 zy+@p&ztI>y!Z!WQ=zdp}K!*c1=kI3;sD!@+N2`d~g8?1Vd(bCt<7oA37tU<5CZvhE z7t-bVJ_`0`p)ifNMcnZNSnFe=sVB~M7W8GBn}0ZcG9Kqs&x=U`_rw2YG-_q))c<%g z(0g4toloG8518POfnnA(YgG4&#ih?HbavfA3dL$SVt_@OoTcN^ zMv`oUUEsnc>gqPWdG)z}CfA($t6IWu|IdLe%#uwSys^?Igv&s#B-K9iyue2CoO68N z%y#`m@xhX3z!=cii1iR!jGZzuj!qt1A)&j#$dpXpR0Crlz!8E3Y%REKPWmO(x=syj z5;JAuXjF4ni{TExZfqAXrCQN`^BXegrzr5Z|Np73{6F{g1{}F#0Dc@DaLc$o8K(~A zd^Ig$v`ple3KoN_Esc|m9~k?}wEngT?IDJ)qv!wB>ClpMQ#u%BGiyOeQM|&eL4Dd| zPIziVuWCGx?AC<8RpBq8S1dWidI=SILb)}`;`!p=A$vD+!{6V{9zvO)YiX{L)gjtq zg~Z}C35}gHpV#HQ<115y?7RHp^-W}y(1Id_n)S$mvE%fwx^90cn*ZOMM5o=afUM6C zfd-1x9eTw#xOUgPu_YiVdgtf=MdZrjHdfx~1-F4@J0oLAgEMPMR$oMmDfus@U(7Q3 z(1bja3GUI1n?evrAjT#f270K*(V;7o{To(y=151H+;UuqDE zjb2FqStRa1LZ5dJ$6{BO_3U)16Lu$?1yCg({SA~#xSE$H(?m7AkT2-@5j4ebCI zl~dxb{>que2z8kONhL0Ha5q)2FWvq4+<)0@vk@^>sbtC4Ok`NiT{?XzEVGOzJ~!x@SZnC1*&{y zsOl?UKm^{0y&xhv6ji-rv7@}Rv5_2)9G%>E>-$@H23frEBD4XtWJVr5Vtf;eTogBR z&gMmZx)=oKjU}$bV`sR~Fvfk?qW8H-(~UvrrH;^ine?*`wGe!3sFt%h5kbJ^&v5F>_ia>3JZdq61EmqL}&nt~`4iE7Ye?;h7wRjN($ zfOn>0+Nf+@6I>Ip=KBZ~h^f#{p4`bf??^lY1bF|Mx{e0dWT4}sjOGg>ibj1_ zNJ69)?Ud7IZc+*2b;A0}4~PYPTVbxIUg48!FQQs_t-vOk<&&%XBN&EdAP=(*)+t8t zq=1m;#UKcD<41R=5SY8J0xypns%E(u&9k6C>ih1|uGOl6APGnXZ;0h#&s%6<-4K%? zEI$$ozd`d zfnLOHF;*Cax##sVq+!2)L~4tpYGVxxpK;Ou+fk-cJhzR+9Ds2_dyalNYWD{uYFtF* zcCxYC28;6gqK`sgykRUHa=w2>r_~1mNIXP>0XjIZy5c zTa7WR+86|?%^-hyop(PeYw=Ch>;MMdsILMJ1rD3UnjS^(#|}Pd_imy79$;l7WdXJq zovAE~W=FIa?jnN!b7#W5Y&GH|OR?#U9R8Hi3*;_jmJTfC`(*&3|7vt?EMe;#ViTG? z-i~pGA39nc;&5cy-mlw8Pg&{kqwk7m3~4H~Oj838+<4c^N733((1RoMZ+?Jt`UsdF zy`xy~YY?}S;w~B*C(LW#;LKt#>?(uHdp&_}W#Czrpi|4VWdr)*Vuh<|syU)I0Azse z7*IGGG|&7Auy8aXD<^D%4nZ*dzyh|K44(&?5CdX+;AnDbD6D#Evunv5H@o3!UH+^x zPD;3KdvkujHw*5Sv`_I?cZvK;5s`4ic&zDH>IT=b=bU-F{#OoPUu2Z9jWvz6aJSKp z#`9+mpkkPys;OzeEK8w7Dgu%gY9LCNum|R>qNAD@A2tEL4h4@&o>~$}z_ApjhZ2Y> zQtY5ChOK@78E9X^4vYT1c>MKP!9*y7=$^%b<5iQXgy;&Pa$qeqnelOzW(KBfp(fK$WVVjgW%yiHhlEIT`+z>22h#&b@>1G!1(D$zc9r@s1Sh{p59Gyi9B|^pKi3j6ciD&Wv<+b=vq46MW+=})5BX+Dg0={u4>zl}o)w}ws! zii11l*=gm!*fyJRlAE0RnzAL_SVua}0O5GBxfmq|r`sQHWA*E0CDP|`l%Y+OD%7NR zG0+6lHTNYssFSpf7RkFE*QuvD49MBr+TNEG*4dsg)8Wz*k>EoBqUH9x!KlK!3QZ|* z>S5m;y~XMyZ7&EtKWI?Wp?9^P@DS1EQuWpFZGSa=R>Zpo+TwUq3eSr`$9u@=BSt%~ zbv#C(LNnri&?V2{O$`nW@wU%F+}&YZTywO&ub=EzG#2q zk!yVD5-&ce+*|Z6aC#3=1qpeOn7bJ~Zr{kqzePTE2Rz3+p&P;9Cj=DF8sHn}+cc@j zGh70g5^US3$vS34U~jO67MXDQCe`5NM5;+|8#Xa9Z?s=z~*9ToF#shAi6T@Rpl~|8x1BLG+if39-+;Hs-~WHgGn2#@(MKn#+!p@_kWF1sKu5oj!@w|cp-${ne0G-z#=y( zRa#Nd$g)0b$@UWO{byfANuXfHSMPeahoj(N&Qp5T#j=-ppXk6Y~&KE-rwJu!GvD;SYI4<8q+ zz)m5;5Nm4I&m`4e`ZwHqxo5M%{?rN23;%gjy-fy#O~}o%rtWA;`S-$x>52mC<^6LN zu+*m9;ljh3t7HWv*0%%5s=p`CiQt5U=y~edjmO!~ZJahfcn{}put5>Q^NNim^JH?y z`ER(C1c;?Ke`QNbj<+wmxon*&%#vMSPGDn4m$c$Y2ryCYz;Jdk?_zuit9u?67h{&N zFZ+5~oD#td>q$WOlZd#{@e+#}@zh?x=t$A zXM7m~yvRT|EXHDD59e^=6tBdxjJWG8Jk^2n^za+OmLh55t;JpXQI zGtA)_EibD8Af9crMOW3vQE!8eu-mn@xoXOx1frCO@Y(p}__(_-7tF{AE|yHW(oX`q z@nTno;y{jH@`I+5CM~&qz&(95{vc9o^Ij~&2;wpbuR>PT_{~ujk>A&*Jgg;`WjA4m z2!(RvWa+28V|-+7GO}av_HmtpERj$fpB6{^hBxf>3T~AlisFx!4Z{P-zGowHpt>cV z9wCtJ8rA*TJ@qC2PLoTTr$~)k;f5-smrC4RopvexBDAtKs@TRWNjJesOeoaNrc;s^ z7_IadqLXB0q(dQA_w6fF7W6Gua&{uO00@G{K1=>~sN3cSr52CPtk3YXz@L#@KX)w5 z-_8DKk+H`$OiGT+o0M6@Q9I0f0trZUb}=9JC7~r!g_2z1G8rkxXih&pkB!A zg!l^kI{pLlwmsxXo3Zvkqs@qOf;S52bk~mPbj62-fMlAdtI91ER!#p2M-O{=^}L;} zlGG5I-zDp=vl8?DSr2RO=g$!|NbL8Kulo7(n{?ucLx&hQ_Xvgyc;0jH{fUbG!c=vg zETc=MHwFO@YjD(&XTQg<~jGgJx8+$ZRo>)^Ag89dph5~ z)bZKo1ihN(wz9e}3{A`>8L_eTtY(1e5nJxM9!4lJ ztmnkK(N{%oVa&BCr&3!O?#!hp`7K@bo0$<6`WkvJGnpxR(t(R&Xp#4K1Mw*i`3c7rUJs`Ag?y=hzv@kmPmOsYB;6Ag4v! zcek|s8nRJ_f+3CBMLcrBP92M1Qrgnarf%tcx=o3dl`-MVOoE^X-TgOjFMOv~2>N;f ztM;u^slPfQPyI810o7a^$ryhE3A2+qL&a))ju!%rtFWh~kD{6=TY_o&Kk)ymc;=wl z)f-FiTZLxODlI~;^U-?neyG7OVJE9#%s55BUbDkCM2|y@w28IFsaO0o8e$$v5g!q~ z)U(OKp1yd+2lz)aAZ2eX^iL4h^kJm8Wq59R)n$lzeg_YtRCj!zm~46sl|V+Fob{RS zP}M^?EizoGF8a#Li@Zc(&EA8fi}QPj{4Z2=zRu*Rs3d$uf|hM84c$!kh{=~+w<^8E ziyzu8Mb^|2<0L;I@2igD-iNx^C`2~q-(97! zb5T$C_SCbh05rDUZZp4o$iEoAh99(je*2a4B_GM}Ze&RBJ^J{bl+3xE7yuJ^Pi*{=klt^=O*f;1#UhrgcRU^sWxz>Y+9W6 zQY3YcpG=E?SbTG-Q^q{%r54ucJ3-UZKFKa+>#N-H$2s4+eUs8$#6z@|v{6M!VS@3S zlkckMgcbvg#SYEYxkx^yGp|19NmzXFndNiY>Yk7D>aB}-l+8%|-o~2ASc8Dcu1s^- zIX$^r?dyFUDfzQ_0pSp&O*iW(ZeqF*rF^LpXjHM+v(^0)=oWYV_Jz|h);}QEO$bhZ5_DcGkQbt(H_XicvP*bgn%jyeY90OH(R2 zG&wgFu#cjT;h9#s@p$MuXN~2&y<#@EqqdDYh(Ji;wC2aYHRIS z5RxrbVY_lcl>)I&0MN2|nutv;G|pdc>cB7jGrhahrY_tN2t@a-2V{%{nhG%*d3@0^ zI$LAg=!(Dj)D255ZVG|v0c9N$Yo1~V5lICNi)Z@$eroGf!IvhxX%i+(s=deejj!J{ zNYEI@w%P-)OZeCl`AS_;*2BvIDAK6HH~Ei7FB`});8hDTa)r7EJVOO;x>cQp?^I4BqAWjRQ7eJ<5{Qg24;dZt&GlV?yMZ0^@2U?5L>>e3zG=^4zp(=qw5M zm+XLlynfxXpF`fU@d2Y~h~p_C(R-m^B8()bCTUHUIwe7tCbEXcTy|Glel{wKSo_}4 zTJ^oiuB@!=1alMuJ7K_i0tv8C7zUD(N8q2|yx1XQ$M-ndIj;g$z`r(Pe@A+{3~iCS zWtYxhwLGAF+5c-JYsTp)l3RjI7IO*#4!JAGKmq`#anhDL{ipASSpUtqLB-+k9LVoM zaZaEUa=mNx$>HPN<>po&_q2_;68I1@Y7GKhJC){$pG>Ex^v?i^6+zXI*$_t6r`6O{ z3~jq@ymGVNlJv9ECO`e!!apQG0{!6W?aj&};RU`k=;Lk+3Yd&L1@%T25;`)7Z4VbZAcKw`K5RM`oBpSv%lp z@|oEx`CM=Nk!{guyEz`vyyNW6Y8qr2_SIp2@aY81S+LyYXp`>l1Rd66Z*`~;Pr70H90;0GA#GFiK`Hmu{jJ?OIfJ22W zT8s1TmI9Q+OxuN0NPK?jDb?(aaZ9hVhbrXiVr4=KJ+ohP(rmRh_0NpN3{tk)QL{PY zrYRtapkBHOnAD@V#kZQP*no-a!V`=!Ik2LO+P<+8V!Rgf)DLN0bk4wU29W zwK}7)4-nNrB2;TRDes1}P_Y>=|EJqr9|k#g3`=H_O2x%WwRxS=x15~#U)(neJ(A+P z@`}si!u|zNWjc50Mn6mpRK=r`BjnDA^Lh7Y)J*owZsx|92GVIRgpN((-&yOioL_1q z9oOo1dJ)p9tCb_{P>GDs8g1Wx`~Y=NXg?mhs1DE#pqBuab8x42ytM1JxT(|>_a@ZN zZ$lbJs^wi~Jm`6dXzwfLgx}(@@DcH{+iKr6`ozKAshoQ0;_I;R2Rh*D^nc-uGr=}j z;yA0ws(vehhiq-;`jw7?7Hl4DnRLcHk~)Q=?7f4*R!oo#((fONYzx9l=Knc=v*Uf1 zWsb8kXSi%;8uE&TMiTSmE2TDbthdxS{B#5IDdMy&>DuV)CNB2BL&6v(v^ZwJHaL3l z1^RY@Yk6EBKR4^R5dH~~{^7nlL3i1m*aajrr(IqNP;lCdh z=bP1+hrSpl!Gy=5TpzKZNwYyMR<+eqY&luHJWI9I?YB9|DbA>JbCtQmkAxu5*e0HY zm#_Uwkhu?5cE|VEV_f-PKgRz>w*Zedm_I;yF?brT7g<<;8!dmwNHrjCp1SLFiP@X{ z3^5;BNRNeb$gUTs`LP>^qvxFLQ%}ZwnIfklBrbDoS+BiQzfa_(E7#o*)PKUR+aLPB zq>1VFz>Y2}WDYtb2~W(19oH0f(o*TmL%O=Zpz*81tHj$D7BR`c+N|aymDZ`J-e7ty z<4KOeNTw@b?EZknX?NKt2$DAneJ4fOEn%>6PIMQfd?4Qa8;ZHFjZ(%fE1PFJ*tggB z$T=rPHDQf2QHnl{d@txUJ=?1&+wyD`C;Zs*40^_&acC&_>e-%|ki``~KvB)N6EErZ zT7N#LY?vW-U1lja*2GKAQ@oO5D|g{z@@ky(%Ea|Pbx*xX?u7NS zbDZi06@ zotc@>@|4yuU9fQ#7omMpR4TulpZdqo4W!<0L)h9O87MS-T$kC-okRsMjg~waeg`O2 z*@rt|1q@$ci4eK(AAO#_whDjk0nfl1g((&aM2r~a&T?oHO^TTi#Yd&CoeG1XOks^( zT4p#Fs)DD&Yu7pUfSvvl$h$ z>rOSE=VoplQZ8w(NIx@QV?e5~<$qg6DH@Z1h%-ycJb8=k0iBddTbC@O6BJ7OsElH> zykpk7uSMB%`2{X+sOx>lqBIHPb<}3cr$VN^ycSpoy^ZS7P)@;APtNoCr>fn<$eGLa zxydMD#32!4YcVm`jNFRLQ;e=|beR-eXkQpXHX4_cYwoW;^?6$?>#y28xyf%%gp*F# z^PnnnH|@~1?gu2PuY?qdSO-b31z1nRf7<53pZ+HpT28orXY$(VH$_3!;WqKkjNwmD z@%ca|#U`PC9iYz$uNh&C8J|hJ7CvspX`_KFL>%&3l97pQ-s`@%w+H8-Vbt|=BIlZ` z&)uIo=YmJpLo-uhH}+UeA76&t8Py0vfIgHOJUU+1`q^k*JWmgn{?1j)2`f%V1tC$7 zCcFkaFM`>!P?9rEH*l6I)D>S&_>+<~`#8p-=yw+r9(UiW5>5!OsL&)zShaI=6j7o+WV6KTzJ;nH3GMD&s`| zwF+P_SX|jtKvnnwuyh8{qTS(>bcJ0120ad?GY|YD0vffXrSCbeONjwlM)vP6)jPDZ z$Fa;u)P%n#O)PRA%N+i{{TwJmzNRPqw2JtKZR-+@n=$>$;Xd=#*eq`+&=q7bCvfR` zlxt{Y)%cBdgx$1@D2PzqodEvfp-o_6#}ha;qG0KA(E-Z}3)7vw{6ReB7Eb@{C*?m&Oa8lzL4Q)# z1fUyk19U?H0^OU2^`>;wg_!Jq#7YT*sY4L!Z#E`@k7$`a1k>$6;pks%_=L0YFEc>2 zU`q3VLnnaCyjp_mq29Y-OLHR%_*<_I1qtwMki2tX#Kp2T-HnxHhRS-ih-=UEBPY`|oE zWmTK|n@qI;LHzGh8MhBJ9P<+!sdSLCrKr96zB04sj!)f>KN|<|xqHtqVxOCB2F|4Uh2n~hDxZ62_R|Sv^%F7Av#K&h8p7%Gab7pkPLcH^ed+c$u@${NK{uY- z8=AXuyx#m4uPd$Db7RlG>5d8!*&f+~cvZVro=%|VcK2OfK%SP-mridG)~-Fw zi#gQ{pN_HHw1mHq5`xu}A+r~!4kOtu!?WO4Hv^u z`e~DXtt{JuZzd@nzsqNg5=-C*Z{b2*!@5f_&p&F$pDUc6k%*DqfOlp5fTTFbPj+go zD<8@qKKGc+DJs^M>*JqG291-96|I&Xo9TtN$AWrE3E#9`LVE;*pJ%ctDWlNEi>k_+ zH=^_VV-#mDc-tD;;o8K@_Bg;I*K-EG^x4m-Z%j_o#yYg`RG~JuGOqIXhMnbtqVs_d zuYn9|ztWvvbg0hn&0Dt2I&WBTvs`VJ~f) z^);h!k82e<$iOwcttMQVbWg(53jahwzU|h3PC@=RRzN|8Y%Zb}eya7O`|R1kL02!k zSeU}>@#x>o*{qPVt9!~&l~N(L)Z{zX)J7T<_rJ!klQ2DuFW%Jh~_H5#_wWHT{|2 z2MwTmf1V7n{WK)HdP)$Q96JHZpcur5jRR0j)*lc<*&g$4jK-cXdJZ8t20%>9Y?;yO z@1k=GbhvQPLYv}e=0wiPxb>FJK1q9I|2`#5N}59DHz>bSAs$LV7XOMub74Y3OWz(x zeqx7xpXg{mk1t-OHS}9wb^Gs{xZ9x8r`!>~eH8;ebb{_L_>C$llr_mIv&Ij&B=m0} z&!?f+m#`Eq#3b6uk*6u>ZMhLRSOm?y(QAWye%w$gT}@Zw&!+jxx8{=kR<4d$=o<_? z92l;zwk44q1Dy+`>qcxWS}l2z%$&#Nbq&l#=^mo4V`qQrtcF16h>USCzB-td$et$x zU8h?%mS6J3(wlIp_k4JQ++$6>{hHFC?!G3pHs|hHc4$4kty_@jY?%}Y2& zqpI+(&E%X0z0RGH@>lxP=U#l25Ip}~l%y_frCs&jU`E<4 zljN6oi(NK#{a0&?=JL~P(;=h=-0Yjm+O#Z!uQ#{LU&vJ&H7)id5oAHL%Dd+37s+1H zVw$-t3m{nIJ=YEG!q$b)muy&LEKtIOu?Dm0@Vuvvo0o1XC{H68qqCU_4&)Yx>6j^- zwRfXuP&;-kOUW<+waSmN+uJ)?1s3GnP*B_&>s+) zfZ2qE)j2*4kkVIHn?-Z+KUyC@ZTC%NdtuneZ4DjW*;yfUwJ}0Zn;Q26ya(*%PJG`W zWAWMnbz64ULGWM}*IXa$Ti=533cdCM;(T}7cyy&SrN2daoQ@e?en3BUe7}V9EHETAreshU48DUDLFe|N%{^+(HQd>GWnu3y3TIj%uwryuZ2LE zzvcg8@4KU#+SYv&f&`EvO+Z?JC`d2TJ4h3$O7Fe*jv%2+hajLx6%gsYN$*ua=~YT7 zf^?87zU98>o~`?yz4v=}-1o*EC~h*m|TjPK$v*YekG& z$MBUx%0F+leSXX?%A}*KpPM3MAd^T?)?pLMFT9dOGv6&XgHG^BsiKh8tyZ!?)h+u? z?WTt&AG2M(ScuvDoHu09zqxgr`sL2kjN-=$kC4_}*>EI=_omm%MA7tTt_87Iyy6UT zUJRGeGwSN+TMsI$G>_*dh)typ3@76m!iweQId@yH00f5apd#e@9h59~;RqDa3S0IvE(7hwwkBx4-K=i?volrT;-5J? zKmV_NJ``ZSjCtAcHc4F2GL$mVsRM^QAeA6J?6(*uLHm@?)tn(MPcbcngOCb|mPt@O zW`uMN^e~NiGEUDPeJLCu=G)dMLb$OXFiA*4jffLmX?h|}3`Qk2^Y8mL8sB=KLodcX z`+)55ld<|r9_Xn%-zCEW3sC#^zf#Zxuppa2X#~I-`T-WFz)(U}_W)HO+5g^665g-0 zkjjNfhpS0|lV*IQPRLETMB1odzqDUbCf3fsKU#KO$iJtlZB2%)CwXU3`Cz%p;r}W# zp^%|VlzksBKd zV!ML?NZuAGZHeuLe-=CZ4E=Tnptt}#U;+__D?iwGPzxu3iRUSn2K^|cF#HG{b7)T1 z0AzgZ>HVXOv!kG^OJE##{_s^tCDp`DoqmxfWTIQgjv$e-cX7giKGNTf5Z1>JjyFrc ztHVnx)w3F3wg#vX=6vd?~A)Jd+A{Z zc>J(ipbch%Moox~<-=$=^0B%l(+#Fj=03JfB?y{FP}>Rv%=DC|lUgR`!<^asyrhs3 zJJ#p7P1Uiz8G;RWIbfFgk+Ky}XoyTCaZ2AL$Hho^)b+K&s0vRW2?t{fsX(1o=H_KB zvgQ_VN^dB0aY$48RDv&0OH6ep*p5zj*ev2^t7Hu6NDguciymN_@ZNi2qO_gs!N5Sq z=6b2mymj+jf66mT+0#mrts7g9+X4NJ0?tZC^0*bGsp#7ib^Dti6%xa955`}FFf}qI z^|8oOx*~c$C|Yi&JfAU3dgme}Tx+-TKEa5oJbGv9F}Bv-cH9Qn!09%Oq?(lIwBjSK z@)U?Ub57}MVd#`ZRPgmW{~Pw%RCr#kQ0bgc{x&*NuCmu$&`Z~5e*}YzA)qk0nH0QOF_ek$?a!P2x(uKW*Wq$`rJ|~M+!H2ZSs!B(rPkd~c zWRx)BiG{*^gXt{~jb-TrHs8z4Slo-c0CE#uF+O@LL#Y(dXRDG2w)1L=zEj~JOn5_8 zYY5_S8u)Jy4?*`S^NQRv#3I=*g;P@cmlvnWaO^hU`hOI}m;%wd2!fP_qoOmZwn=)n zJa6TNUiE8i4e=56K(MkDGy5)W=nke#t2;xLLa&3Ij3jmcw6S6eG2Q+haY#SJ`UO$*851A@Ksqhtc}S5MHP`9hv!_6*#kY|*>PXZpr~?EyS@|^XyTxxnu$@^d$&PapZa{$ett();iBtUa%?E&7y`HJV56Cz; z`wp@b`&#Vz>#0V~)@Fb{5X?UGZTQu?-Id6T>4tAfL!K(o5#qn*k>?s%J1rb^K*yZ? z&%X&x=$;xQ&h&_O>S)rH4@wyREa_93hN-L({L#`boyd8s5Yz+I*>>% zng>i@6I(NllYjR1XDY!wa)ffnI?Mn!!TURCG~-GZ0E7Hm>Xu0V8xp(u*H@1DY||C*L1bJN6!Z1yF*@7?7|q~8Oc-)MkxR){in}S4+rGZBh=*)6h%Mk#L@RA0$H$ zP0`07W3f;{@|L8T@mBkB@0siYJm|9lIM>G2YLavQzSBO3@g$T=q791fB;$=9an$m& zPyo9n#X^^`x<#?;xK-&7@c;ThIrTr{q~9t1g&aRdW9iTD{k0+wI+jsc4CKOW!zkNn zWI%81?h^QX61Ej|K8gR!g?`osP$%yfB2atI-_3dCO>^5@hjZlou+ zYSXOjc|udHN^MQd2M@Xs>QCzlyK0kvAz7edDJlQAvIMFP zAC}$?nA%bUPRP05JwN9sf6-caxI|vas)>FPWeogH z`BW3@Pw#+M5f$@7_b|D}ME&QQ8cyxlIM=PQD}R*wU*78lPE`#V!i0nd5shEVxD&NM zceBhF>;f>Jr9WnYQFZZ%vzjo?O(*^VaJm5f#EV$hQsL8SF1DB0gnT@`FiRu}B~h$I ze8$`3QmIm$)9jL#XVu2mm@GA@?kpL3511&%h-(}vWl$}BF~~>uCo_TZaVjDOMZzIw z^=`UoO2E%HWr( z?@!k3rK)<(AN8jvzbs7?`~OR?^1?CI?c4KVm0@P8e|{0!s$>u9A*y)Up9+SIzcg=> zrv9iu^@QI;n&x*O^b(~aecvpH8%fsRYAelCH3Zc|dKa^F3poZ669 zNvL&vPmvA`k+{cVdt%?n#_P47{_<+_ishT(ntj+6u?Q&&Nb;K5{ClRCWbukQM3{3q zfg5@-u_kav88=wLyMTzEMVuR@1{v%d819!!TAPNcNDn8!K+!w%GcY=jfIM;b2f)K* zpkwS+(YyY=XzN$g{u^TjhSi=irQAQL?C{7F=ItQ?dx=@%wr}oo!r3NeLt_nDXlC1`WW8 zl>(d(9mGykfYtNIbPEPJvFKt47AwE)3x93#YK8WdqJn1a{H+8=Q)(4U6cd)7Ylo@m z=`MiP3wg>kx6vuQG2n;U%Bi_w`#dbJX@6i8nNrGJeb*9WVCR?_uLmyu10G0o#tkg|ogaWbGk&Dl2JLrH z0w2w#;y(mV{oVztp2*O@YlYJ{c`(%SmFL%3)K|N9SkZdoOK-V zl{@KI^V-V;<%=c%_Qv7Ax^kXJ@N$OKSa!IvegOzlMn-%NEF(-LT7kQ9R#W+0+5a% zG^cAfuczk|W8u%M)E|NTnUH^dXS3h|HGlt=AwSh*^)x&Duj!wPZPQp;%?=&xuO^AP zK6S0vyl8)?@2Ow2w_2$;$4lY`av4;XhSZCFb46f*0Gw;-qYLRv-pu?PomwkNxU?v0 zuQPQyvSEP@X`kjWvvd2Xn>!L#SxV9>t7*1xvQ`&Y==zdQDeAnqS*#i_o7BE?Wdo`A$trT<1v$?vxU01p1i z8Sz^!w|>;$NWb|pTty<^l>&3>PYS!Ge--xv*xtvUMAyA$q&_^(=(Rw=U)OyKd&-{C zMdvT%V4ARs&4HGlI1PxnkN_cOyB@oH;%M=nB!;zasYHA1mN*p;@MWphzk|4eXaI(Ox)C=NvQ-pd9A$4TU@|z3XRVd>p=m zEPiN`2Ma3YJEWx2SRU#e=W@PZ%eMWViz z^fg_tXm8Mk(sCo^D9RqA$(dGp0O=(5Wko>i;ISP0b~)8}mWSM2Av1I-K32R-e8UM( z+|IaDCt`D^I`utky-d=KmKxcb?3zkzLlWm3jPcp3a_}pGA*e0@G(K-m^ndGR>~>u1 z%n^h6H97K1+wOJ+oZhiteBHx8jB-3$-Ep^Cfw`;l`R&q-I?~!X-!W{?i)FzW7ugm4 z5MuB^m$fIuv*sxy_pmRaxT~)>Z?jRLNzTfB?soBpAUTh<+*w8KHI`tl4O&UOah)5! zRh)5;yiE-Gqdn3FA9FzHn$YTpX{0T`rHtx(XBi>+aKYEHs#VNm4 z&Trgy$avtcU^TmDyCjzMf})?k9JFl^gdW2kq}@XKyT9e8u-IE|uX0g^X-l$!4#p5X z{$6vr_=rGp{@WBg$ksS-|DOO5ARmW}ytZ8NnmtCj6*6_2b)*l{n<3%7)(;#+K}ty; zUbOw~u{5u7+KWsoIOyi{awCwm$H89r;LC*8++GqB`qIZw8U>qBHwM&)ow z>a`g;5cR7c6`t#j#-7VB?kJ>kw`#6~*p}{mLCXZk*S_84?3Q4t%S3C8=ui$)*dGZc z!9A*f~~ zLhZAIO1T7?hK4zF%U~ZFKSRJ6LC=F@!|SK^!mJJxA9@dD*KR6vpx)BDG#Z&Yv9?nA zSSGAyU<*IWJGI>wvrQN{^et{-uJPC%o`19{I0?AU{rWkY(k>al-kSeu@pt#bDcO+x zo~<`FW0E-MAsyqR9{VrFJxb!2g|QP?fO`HK>z9=SzNM#T zr7Z1qJl#`1|I%^}8Q%p0uU;k^z=$%+Ci{0j-7z%eQq?$5OBScX-hDp4dRWs$09#f{ zzY&>40x#Y5f7vliW-{xruxl7U^+C&j@0GRTZ*m_%ZUxBa64^8VP&h&Q@AC|Q0dfe; z7cWPTBMb+dj~rD0qVc^a*e(56YcyK9mjk;pt0Y4``w*GL8J^Ue2H9pAm&)Hk1#f^f z((HqOQ(VcoRJ<*dJ;T03QLN~A`{dPgjUVbPn!l;D!2eyH<@dH?#+qk_b6x-F$Io7m zx*5OpzeKY~aK5A*b%x(yad}xJxZR^7g6kNRt;X|u#@cqYcfETekBRGne3Rf=n&wZ@ zsDlR0FRw^`>_jxr(#ZUmq&_?a%ETHnS^r)nQkkFW@w?su>z6Ak;*ycVA&3te85>i9 z272YSPqyIgs;$k>4$QyErINT>@fhNs#pCW-Q2k7ecly$DS-foUKKG&-CzvcDHZGx0 z*f%fTiO(oy%b}RIIjW0^Iop(JlUxZ;Mxu9b6(o+{@CZ75;Vn*=8{Q!hGrJxfB|L4&dX({)8n$8+7vfP*PljNGAJjU_yai-9$ zO#COWoBe@ME4LKL%mdQ40KE1$Y1@{6leT5QqC<$RsEjU-8`*k~3A^BMWgNCqNl*s$ zu2pU~(&o!dIi6nOUf&l+u|lKfm>6a02q)2gdiB{gaP`(Y^r=3QuJAc zwUG!thBHK!*zi&oehe_i8B##!5arRukD0n&#Bk>*rr5(i5qaT+zO^FFVF2G!1#l*Q zTlH0cb4&d=`5pAzBKqU$|CVh2Pc6x%@-l;LSD-1U(*zo1Bp0TWHAeU49V!VOCGCtv^KVgXmrip!IU^y)dm|yM76ePb} z2`y+Ao`%b}gjZV7Yu4Z%=E`aEeyvxYd_DwDnJ}bBoi{34xU`F-?PWkjQei(IJ*cGp zjI2*=#0X6Z^d6+S=NV$OmzL6==^X`53=?`qc^GgI;}L~}=Yv!_{)3X_zfU;-nZE`< z@gRRQECDto|G<%|K>e48&(P-qsiUG#=d6W@pX=#=erg2cucE9;=Kazu2~PjXkNz9= z%mqIc;ek@S))h-?S6*-ANp7d<7VYe1I8V725dQ6RjV&QuYBtRs^ZPRas!Z&vIHhOs zBK56(p02nh^C*JPJ+PAPgD9Eeu^5-bDBM|T(>LQ{kv6I_BO%;FME2M z5Mtd62o;F1bdjPf$Ee-#ZVtKU_O0}si}dz-<*&2|RuOmO}rwu?FZpS74)AL10hN5ae1Pbw`WfEsK0>0Mmsl zA!G!$iz?8%9H!fy!5;*YA|{}|ZYbq&=Z2yFq@CMGEJ;4=hx~Z-C8~h~;-sGmtD@ar;%uy$LM>bO_Yv z)l!r%k29_=Vgcp?kca$(PW=%!BLo<6e}$|b0Tf-t{szWfD*j*Eqc%~>Q?-t+QAOO2 z-jax-KrRd!ITLvqO2X!}Ctr7d?XF%+hj*k50A-PF=g3b`cguT$_ZbZP<{hbb;yGy_DlKuR5sS`~FtWe9hhvJb7r1UDk7Bg7 znbjEKn}`RSJu&l>=9ZsWdr~{P6Bz!%w~I5Ve1)}&1HD`gA15c>TTNmSf<3VKHPU7^ zS9iwO z$Y*(na6<XhBX`SOW)7ne$3>zzhk|~4|4=iBkZYoZtPps}Ir4sZYpoGCFzME^%e;9rQ za#4n)fXbyUXOzFSS4=>e;dtUCnzAs&)TLbgZ3C?1IW2B9NA^K16cam{co%SXBarw7(U;A zY&$=Dke>Bvj5rpjKZJ5UP`TF!A-XGZ5S6liPnl}-3gab9X|_c;2k*P3_P`FtISlg8 z$j2e=i2%@RoXS+XYBKYH&6JQ9wumx{3BDl#-S9DUyMtgl_3wJ;A3ng*Y8?UZj;5CyUaTc#3P>6EC?L; z=$gNs-IO!HB9w-rceizW@%J*MOM`$sBIEBA^TLHq-g6sh`u%A6RxS>Sbfq6Wb-kp@ zeMh-buJW5{-Am89Nts7krpOrH?W|-xM0ixkF5?62AhL&Zy%1Ac(lysfGS#WYs8lvD zxaLcJUx|HaB#P5||DJVwJmIeK8V>G9f)ErX$8?N~*L$50&A4+5ed{*drv8KXEq#N_ z;5!~<=#*yBXY1)lU`_4Qkiye`|BBDyaNEx`=k0t^+#Eegf|lsug!*T~4yQs(ls9P| zcyQ!tLC;zOlwA9u>mpzp&$nlxxSqFo1I(9BFM)(EhtEoqFp{f>#W^7=Atoqd7KMDk0RoSu?6aetZe87I5jqid zOshQoYHG3jRaanMoI2pow!+mYYM#@0K$Rin^{ih;|16{L*{gGAHqLVYrY%6*{dbW? zE6DZE7DAJ~p{%d+Fi#5N9X+#zDM(bT?{RS{&dkGdp1~EnTGpnp6B&*|fM%|TUu@Io zNZEN)i7Zz-~Hz;6nc*lva!7N<}=LqfH zEk?BF*SXs$NPaZpGTM8$U8s+h_K8fW-dS+cbTEyY+-0bukNR&)?BIW5KHUIMb->fhGaHtx! z|1iPpf=LP_*DJ7UFGnA<7>J*>9R7O$s8?kj2UAF~Al+**5L0v!FXa1ebI2Oh zmLRP-s5*>3{+OH`8fe>dB;u-as{58Fw?8TT85;t1!;6rXe?8W2#nveHIb?35AjP84 zzNr09ZM>P(ypod>9_A(mUbAw|`?$}O zR9Ga;v67K8YS?vl_5?Mu{Q`4>M=No7G4<>Oo$Nf@g0u-WLKPQSI0gH`I1l0;6SX=B zY`E%NJuSNQ=3w)W9FE`Lt?gBuI-0Bh96#|Y&&tv0V7>usCl}q(c=fGne|Pr!R8Y$T zd^JXi1Dv?GLyK()8zNP!gE+HzO$oq}%Jl*wTNx?Dq0?WEoxV&lR<CwpSnCqLu zReM?pH)nWro!`#r%h0)35aRSPGq=1VQkdJN|6|~XB2u`O1kRw;X8tj;H&Zr86 z7M9M##TUG@-d+!0~ zTJv_Z!?)9e@>G93=Zxh{?3Cv>tqXe3BW3yBqwTY%M&Cu}^!g^sTnuLTLj$4$x=cUE z-ChgG?_s*BYh5w{e*us^D8=al6&CX(0-c9&-E6Zr1uY6)pE%GVg{mW&=^Gyw6K<0+ zA<;M!Mkv##*51S}F`v@5)Yc`5Bu5L~4OiCgzRp!kZw0Xl711Jmss>_?zWG6zNuQpe z(t2sC>%{;EBEBw5ejE|`2zjRT(4CPlLagv9p+SfFby;Av*pj^jH8CPT?Kx?w9b6dXx ztv5tTqL*?7#wQHY+*c0SX8D3@s!_##U*t&lpnl;&^!vK?laTf}g`G|LAte`gFPWx% z>qmtHSdF0`x8d-TEq8wX)`C<@!UC+VNpwel3tPeBu&5fAw(nS$~>e_10r9Rnl*cE{L}(!kBOD*6lZt3Ey#X z;T$3A&BS9~n(K33E?k!+*V6kzp|+I7q<_n^MFMQmuHCy6Ay5s~^L`P-eDBA3;1hx(6QE%)qMBmp>_JgeNid*+Cl8qsTx)*FdgEJ+X?};JQLYB>4&mlf zH?F5LGLZ8#K*kn{j?l-SL`FW_p^K*bM6tVY(f(zItL5%$Enw+7HUf=T+)vi7b&17c z>X)NMic3)5kVpt*3|nEVbXp=L(|&35@ZPCx0o#V=@e zTq7sUfkB3}>CFYm*H`CfDNUMU0O|#^V_@xM%&xmzz>9UpTWa!sCJNrZSKjM+PYV@J z6<>cYZAx5&$0+>lNlNOmb-UBx zVVHp8@U2c*36gs@Lmlw4? zLPwtX7=$*@2*Lw_6k3VF+f|wozuIG30adM^jYmhIe`8k)&;l-z4IkDW_6#b0iM5rIh$H^6S`K6}}2(~nfN5KD?1oFRnkJi^JVDG8K;r~ZT=~Mh zwx$ZFpH4J(K25qxe$q3h*n_ff`W24biapx<7FwC$jP63%;K_y7>e}kqCQ&| zsW`de-aCN!vMTm%p9av=1d!4OK%6X`2gJ+tK+-P}*tq}l2HBs#4G6H${GV_BKhOMs zM*H9W&WAQLD2m59Z6TF6<_0A}{=rKM4%FGq$ADnG?A}oRKG)&)iCGbj9DD1M@?5l% z*E%^0QipsW=O?DH#~Yh1dTKwy`#A6{F0e81deY3(7uy@mJ_zQ|=f1ZdUw1?c{!zoc z@@B|qKza)UO7Q>Zd!;`(^OkTgmt;FqM~gJb1!@wINVW=4+Z>Pz&Mo5RUKnzWh1Xu2#NvhoX}Pi@p;maV_Su2 zf7R)G_UnGh7L$(}%oH0O@ZZ-q3wjo%J=jtjW9iPb)9L;;uB|Ge4QH9Yz8d9SNKX)k z?b_x*l7!%Cd|10;ScWASoG{E7P0o}{Ct%STp%>SQr)l*$HTARpR`SHH;|wslVgqZ^j> z8!Dda&sR=XgKTQbJ@ak2wRK*s+>A&!eGpB{$~c#$uFruMgj8}xUPR84SyB5HrUH^yx7k}?U>vMfh%B!eiMAO`sOpl0-_3XkggAIs zVzuNJC2kOH1{Z3jPNMhoFv(FYxxDjh>9-FeJ`Q#f-lK&GBxXxrn_A)T6W}N3Um^D5 z>-_r2T4iZ3Y41K#Ze<`^Tb3g3+?fe{&MA9UNu12&bXYaUra61LP>j%;>vk5`fa`U-&KFz|*xh8k%ZQxI zK{-7)26}-jHd{ZuPYRXZ3GPkvZ#+33RdDp1F)3f~F$kJK+`F@Xr#M%C_Ej&P78^xZ*PM=f2 zNT>!VFb@{g3yaAdfdIM*|3RXc@xSQU>ZNh%Coj{1I(?+%C{qQ?Lo-Z*v?;z6FpO6C zEy!J%&`0Z!JM4<;Q?+H=drss!t8F-3n7p;1KztQjv9?Ol7l&2{NS%>2x}MuvPgPJW z&dTqmz?uNamOZ9X<%*JUwdAQrvP4bV z-|>B2aA6`j2;t6DlZOZl&Ckl&zd;CM=bPqtysauEACA8p9gFUOT_oNn!e?{@3*_u_ zIX6$r#e3T->%_7iT!^)!K1~TZysK^Igt-z5WldeuByJ?iy^hJgFs{|-=r|FGcF=v}fwZ+Q}zra!e$+P)szrK_{3i4T1$P1&gg9egq5 zBJ)vnx4#Muh;qEz8n=GB+arh{7V)-5{1lX{`f?)e>- z_c%DucE}UOyPw{-WFQ{`>6uv~MQrAsbk>rReRf{nd%Hj2OnGIboxxmjLo!r`3|s!9 z4i}hwG#aI?fK-#I%biz!Nb`V|LlmrBjn3{S%FOpncm+e9;AW zKnq`;VtDv)T5V0mH;BQ{KRP;XCpudo)c>JSu^K0;O#`Rsg^Yb>)w51lM1%M& zk`a>oy53JnHlfqU7l;D6S!xN2US-B-Vbb6cs49H~@z1rrI6s19OjHkCwBQYMEA7u| z0|8=3HOaKO^c)@^xGMJDBY2$595!g<3bv{FjyL)Ccu^#Itb;4{SD`dwql7AVf&Q8& zMu+;W-s`dVu-v}gT3go5&+~{OG5YeF<*eL1iARO_vNX>36*oDZMyVIA;p=R8{_vqLzo z(Mc8T%*Hv_t%$G^gZsQDAEa-|+p`VLPmwJ@rd`2AcgVA5huMV~O3kL%2`lCpZ1v$D zzY`|qRpXXHRv5R<*2_oFg3DtYxqD%dI`=9S47``T`33z*y;fE=x*n@kxxok9dfO@u z9D~lOJJ?>3R9z8zol3s1@iNb9A1O2Gk^6XO8cQ-RkoiA>-R+|&gLndXf;9b&l#re< zyq6@giFdV5GPpoRstGLEH3}RmN{fw_YjDu~PWs^hTGi%F8FF*V?O`F;1Y{EyB_UvM zfYwC}9-g0@kCwipE3Zu#;dOB96LOaT9m2uiX<33#A5%9n>1K62YZ1__anZXX6yG!N z9Ei!}y%=@0p=hNlPI(pfT8}qoLNo6K`7KkfS60*wN!SsE6WY5HbpUYrxA(3Iv5oa$Q_UL*M5`*+mVz7xFn z;OR_)tSh4Ls#c8Sb_I5(2{GYQbyXx(ghf!8U@DR*>6s**Q0#5VFQ_ONS?5@rCTWmM zI`!4mNER=*#Cj3GoP}&O@mn(eO20CZjW7`ya%56px5%Oh+1Fr z?v90oS$rY~$NZYyY*_>6VS0${peK8Oy+9E=hMr$SXL6Dgv$Vqptosjn5uMV9*94g3 zI=2@Q;^x=HK~y&j-zqOf2B%Fm<$OeUnm4gzz;6U(g1MB)IM4W}oDVc*wbFU3V-98B z$&#Z%9dcZ6B|qxPW)vHah-?*1ooSAC6wYINoG*;@tuaGK%ELqYM24&&3OW<85qo&U zD$yVV>hI3X@`&>wD`Z9_3+rK3c=;3(pM3Tc725ix zEl{<1Xn1^-anAN&vylPFxxG?8KMn)LN{T_oUT^iic-gm^we?>|WhRlMyrs})B{W>V zL??Z5@`C%;IV4L@z$T078^VO(BzOsu7X}qR|Rxb894gL4$Wu$x&=GwX?9X(_!64)TYNJjQAJRhzP9_aGq zM3eb7k{|&r`dR8*dYox~JF~RTCBv@UEzzv%xkzcRoH(eFx&pjF5&?o>iVUmC7vrOQ z?T8URwBK3La->_Ro6)3)l~Gi*MrXSu%zB!Tigh~o@kJF)@J)4(H``&rOneuqxuaGD zK#Or5f#8iQ&6i%0Ksn>qPKHRl*ZaAy+XIz57;-0;-X;o;>kMzvjiM&DyYs%aEMt6} zDhr*#=N$9Rnyy*Wg|0=hWOyQ!JTb^J@!HFLrI+8{lh3S1Coi!GSNB=c5ijbA4~Pmm zcId?D;amZLOG^fN(gj)tIWA=}Rqhu?`> z5XfBi-Sl)mv}7pp$GEbrtyhm7N$0g%KmG?%}$a#pvX1q9um-E?c zX@JnID*3jrua%5SQBj=QdT*2BPYoF2sn|?H<@7Q4~>1SKZ#TeWef+)j`)bd!qY`@jQ7VI{rX#fK8f3!#_~lub?;<_(}hkKZ`jBfap3c&N> z>pbk8hD6Fs_IyJKd@gx(8)YF+W~!+jnFa8QaB4u%khM?so9N}dm>DZnkgCf2>al{p zLxMx|3I#|=?{|dd^<|ldr5Myu5IX6+G2Rg+USjWqno;Gm*lV3-u> zLnj31Npo4LEde|EB23y)31}duh(xL2oHLh1$snr25~1=Sz&navlEx&7K8#I>(!=Sqli_AEDJ_WsiB@m` z(I8%5{TyKqP5cVBE`JBkH3p|^(M0WE&vConcRpM#K`kux!YIpEusK9s$I{1fYa=pS z#%C3SLl3e%;$jA=h#3VoxjdcQT8E^t*F-61cU#7d5>t$B%k{lDi|T)41`gjkh@xJY zDmYgPGi8X}*`C3SlDErM>COdrQigPcQD1kg(7G;x+wpN+M;opgArJDn_PF9f4kHM} z5j1xUPdU;#EG6iHFK|_AeKMucI4dIy2tP!44RJ@IAc0bNxc&sz1YT15qw5vyOK5g`>4O{vdG(CqK-jP1Sz1SnUyOB$*<>n$>j* zM0{a>ewqu+_17~l%)f)O)etmU8=RXuW`x4EX!7>iCR^^Jo`Psv5L!Ld`WumQtz=Pn z%N|B%oMFFZ|BLFCIF)RKFuUXO ztb1)*nz|Vtju|A5%u{1!O{CDIF`*}3F8x;7a|PHVl12-TRvC%Nr*aVZDh8Jv*&>>J z$*W~WREqz!1@cbH``dSxNXf)J2j({+W{a0a;<#ffP+KkD1*Sm2Vbq?NkfJ|iMpB5w zO(k>vZQpzk6e57b6dGckyLn=0QxQ*%Unx)2jy2V6n$;T|P$t?v--?THa*5@z4+zzxo#|TXGSvX3M@uUm7HW6W;>9%Tcb6OPjX1eQE#-6 zt}YG7>)o>yE+_Fts-A#=zD&GHsV2V%-PglF_sY6a><`}sIwQT-Ckjpt+rM5B*4lcJ zPwPmcdL;yCWz7vlRcG?OY^4IKv@{4%JY7XXr~p+sgOR{|)nOm%fJ+W`sP!F$@#aZ1 zm!*q#%61=%i~L=3c*0l;XxFNQxt;XJ>AnNIgXW-wKn!CJsJkE{SJ$}zEX;eZhoK^_ zQJw1U`d)gS8~@a|luceF`zzP3L>DJ*IeHgfVw9VS{2Uo%WTTXQkL~$P;#I!=y$X54 z{_^Ipru4yFuS4T*I0y8S4MS*MiXGdMzJsQy;fP0NcUs3pTex zS|aU>4O~Y0C}Rjn2UM`-&{BySqY#t6sh84V@IO1M*UZ!QH z;?+_4n7Q8+mMFBn!bx*jOLVX&N5rwvVq-XuU<@k}SH`DPq!NNZZV4t1g)q!$c_+0T zj9$O!J%xDnEJT^IaA><|5oTLWg__lgZ%8qNBp$^{pl-;j*f5$0Y1HCz>?@MdJ1rJQ z9bj%qY{*Ukln%k*(2x3}7Pzdz?+LnB4gw`pYgOY~-P#cxLI@A{frZo8%ilpNx1OfF z-6j}3=)EpWw!V6%?I~8{6XoyqW}@B)kViPd1Y2drw>rF(hFhXL_Y^zRpck7e3R)x1 z7W{HhNB7mTsrcFa#d1OJ6&ol0+*Ds00-z`Hts71fu;3WpHcF?~llN+S02Iw4eYI-q zNXsGNw&eOiymtq!fC!Yn6#c7F6RWpnH|Z_)!lBV z(4Zq4CfJL7RUM7)SOj{dOBR*yIkSP70?K%C%n~*gX^43uckyO`w8&scu2%ncbn-R* z%0pVaJ*b{(jrR{%U>^A!pFIsMI|SZXF%pTl+2A zYk-xAJhcv0o!3<9JWl2=EFg&mJYXz4AWlW)4Sa>&YDK7Od2P&r6cs9q!p+Hn%ndz| z4ib0i+eq+~8SM!?HjvXbtkZ7QgT8kiRLV1QvV(^DeAB}wUI|{me)c4N<6)15^ ztB5L@;z{P5_#753H|h4+qgM8#%k3L(M_R?pc&t{2LPr+&$4sHZL?XK}_g>vYS3*`& z6~u8QPRBT9siZ4Qq~a_Ra_fbLyE`u`D5U9}GjJL9wG2e#5hc+-p$x|P2o6f3bW&Q( zq<*FKb!NWvJf}v{Zj0DCR6E9llU7vqARyxfKo`2mi+xy#Po?~}sZiFES;7s(l{H85 zjsu3XEwfl@KScHxhBNiy(viBlpf4|niy@J~pUmo)ZB*io-Vfb_1U}^|a;gY#5kZ<lj?qbc?F?c1^Q5Hb@jY|!io&)w=L z_v(^v27K}`_1z&e!Eh3eH;pkk8|xuVy@J|f;PNL9yk&M#U1ciF?|d# z=fj>sm#+KLp^q{5P$B$7BY50(7BBZE&OcXKt-h3!4bSlML~4lICOjuNP!=H%Ux`Ww z@g6BUtURu(kI=OYu}96soOK4V@B#3?gP=AMSjVkcEx4$m{Z zttRAgcMbkV+M@3wZ$L!`!!>oWlyYA*tJDCF>uPPsMz0!w@iFFfQ`NU-t~m@=gQX&& z)=DvMsM2`L7nf7hFG$E#ZO0F!I+oGv4#!E!&pwSA)~&p-Qxa|@?0Tl(Afv@6Oz^2j z06h@bV)nFTU8UA`*4B}CjuZR-i^OrI3G+x}Dk)=yLS8{M@l32sn&liot~D8pJX*~{ zoD3C*7S{s_^mOa%$uF)??G6b{f)SE9C&YBXaWY(>`6bsE^DYB{eNZV-;s4+~nF)w) zSg-W%mcq*wbYlf!3bZ#$Krl;I4s_oRGU1^3c#W!3a_!AzM>DAor{Eysev+6GJc(=1 zqbQ>&;esjGwEU!73w@gTl| zKq&ii&d3cz-QBCe;md}^c_Ax@xuKe|QH>Jh5^w}2Tajp856qH*xYt+pc2`}!f>3C5 zHXimFdLsS*s_e|eq5RiB{*19DYguL(!i;@s3}xTLNF><@*(E#KN-;x{#$-v>>}1bA zLiX&T5;FERLMTPy{GQHruIqfi=Q`()-+yz>%pWt?JokK_`~ALO@7uTY5&4Xsr(1)p zY3siOu_|u-Eip!eY4pg-VUg};uD=kGYPYFI=?8x_G+71V44q3zdc!@_6X{*k;rUbY zJ&hQjKZG&Z_hoxKukt=<-+p}ZLs_#NC!ee|z*vhjs7iX^*@(S9B}AqS zhUBR;;6lG55(C?Hlr^_rHHo6|)8E<^8;7B&DXxFvhVB5 zo*v--^b|h7K~X8+E(4sFr_5Qs4R-<3dHg#b53VRqLS@V*+biF*NLGw_4PzkV9|Zn$ zZ_L8cjm3=S*YtDiD<&dlgSD6edG%)lWn>@{-yB>yw{VNHbFX`M9WB>a`pDzdbsm*+ z4PxL8DXc>8>1Gl9HpJ=D`1S>Z$Ek%4A?CA&#a}bAm@eOem*+%P&W;6qskT{r5aygE zp0#-4!6N4yx~~fUxFF4IKQAXEOOBMxYG0}@?`gWhp0;ZQ2k$L1%|IXc+xW$EK~<_$ zSeara=(_HU8~QDipUKh;Q9pbzjO9&5Fb()CLK`b%*_*=#bs#pgKktiP?l7$8()2V?1-;#aIbCf+U=xw%`BX zW8MEv>-;azL9UxupN!R|Q-bD{M6N6;%oYX^iA!qr!$;=>_!6b8F8903y;D$7NBj3u zpN*HtXWRT(-mi}g$RBwz*O2QT{f+Qw$tkY$f&d)$-R}c+lzpi9{aZliN9O#Zg9PaU zfl~ z-JAb-biTw#CocjW7I-K9{^Q%5r)W8iR9%iIH}v2R*|)+8-`(Yy#7|qP{SuS6l%5*)KlmMaCqnd#wF>us!LuYc z=SVvD4j4ayrI`QRRbxGlw;`r8pnADUDSn;9${sb-^jhZn)5_p;0%Jqffj*@AYWJB*Or3dO`)f3k4SIjyVlI zy3b9mauP)IL?5*4CA2zA8^^VKp_nE;sZ}vFybc9E9F$r5B6P=TWD{jb!}E$lCvVp( zf=IeEk^2thG zvVWPd{5<|=MBnwy-6TA5enqonL!z+L#W~@aQ*=sKgnEgAS(fwys?g5?=9zcw*seV< z>Nu+>=|e4(FIlY;R=PD`1;JCYPPHQyL^$xf$c_(lV4y+I8@;TACM>(lb6y5SkR*G; z2S2*VQ3Wcz$n#!)iH6+mm2r zT#?nILw#d^_IYzKRl76n8u@O_eBzX%-Nvk0qAdG)!HBDh3X_V)q0=GYZKM0I@sfKl zt)~n5b*Op%Bo^UtzWs1lGGmpqtgEr zDaT9ztiL?I9d~-Yfx(qpkwr2#|305C2Adzr6nEV%a$ss{0s7n<#e8 z`^5}A#KnwXt|IX=(1QLw`Gr*1gL}>LCSWGRy^yrXehHo9kSK_#b}<8Thq1kMxo<&C z2eM|JF?=Fg|N5rO-U2>R7b_CT^au|e+E)!SqG8DNltn6o;L`Oigh|uXNRw9lzyRcC z27i=5!s#6x>{4$lYW3*L}ECTga5J#5{Tfb(G1?F3&J1)L(;;mydKoMO0k+FI_ zL49o@Vm8Yp*{p*%-g95%Jpw#XY}g;$wQ03yC5mnFIFAQmk=akFto+R^@gzblu&}A3 z$n|4c{s)^1Ys|708L2@GS}=36ugHwQI&d~ZbC$`}rFrzoLiJNay5+XOC2a;;xS+rw zr)@StZD0Qc5${dV<&tY^a>vX`2HqX*@5T6*tnG3-W)NI<~e zlayeP6-Ts1?LR_(aJ2F&dgPT$x`@%G42tI+CFHode0kR13!_`5B=5wYsvCq$y@<73 zmX*b$O!z4c7kp&ybdLA3 z@9WbDT>Xc(`asw5!nh^UEGJ3GC0wQocRK@^hA^6$5{A?~p?F)bf8ypW?{S6}#_od2 zoE!Jvr9_*VKreqn<(%$fc};5ka^>BW&aUm7g;)p1ol<0dOFL&bGBamN?RtOvQU!R4 z|4+2*znu5~`Cdpzm_f{&dxbL>IYlemgB$^#^o#KR^(d#TNW{cl%M&SZ3sCme#IdCS z#$46sq1Q7J%)L-$eQ1Sp*VNpP79nRIX@SnSaG3byCdR+wBM*vgPO+U%q8t<;x4<6B z4xC@l8LVcOuwan|CLXOh`JF;_#`l#P#=|DvW^FX#FhfH z4|&VmzI<%^4qwma5!U%2pryO|iW=-7@(F&QF|zx7^kM}%BDsc-kOtsac4?`tF9{rVh9R-ml z6{t%&PS33soAZ#y>G9y{gyz*jq>PB)$lH8Yyx?fZIh=;oAH@0k~#=E zCkOl$3t9O~*DX==?t9y&MA9 zm5&HxJ{W=-W6vAm7x`5K)=HnYwSR1Ks{fmvP9XL6A;ha?4`5iwXGZvum|6JNSHIxi zU%7QUEj6iK+41J|o?mBNH%q1Bg%2CE+zlUB4I-Nq7k(#p2Q|X`5rlFUBqM!?iog3W z??~RLul~c|4EY(HQ{3(I^MKOQvMem%ycf^|C`!4V=y{y~weNFGduOA?>W>TG_G=&P zpmgd@T6Ro>(+L~Giw<)3r5t;{rNLo2Fp|cso}$`*S-E>S<>HpNbI7=c8SyU03{P`E zFbta2eok8h;S=4qtRMbszZneZDXh5h*X6d_dOHQ&I!Ts3iI#AypuX!7zsbNgwpOp+ z5{0eUm$@N)b6ua!#fm(s&JOu5Vkk9z%|W!_#K={@(JFq}4QBOaDUgAf7zO2$B(pZe zM!>Ca!Vg>ngnCp~ZITe7Hc5p1Rl0aw!rs!F^~cd8UQT>~)KI{{TVF~6%L<3nH$CiZfX zJdksSl6pqtckkFZSJ!gYE^`{(;;BAEDfV%&FWS_eH%|vZ!(lK)@$aH0oGL)EJ+4@L z=EeqVkMCipb#c@xxQV$;x=UiwBr;uH;EXFCa=C_5+^EJp=tr$FR*+uPf?I7FfR~G^ zN)-K_t?%5oy<-yria~>~(-~F+8F4Q0W4r~muSedF;aG6`g|gf}R>AMc)G;LUJvjEn z#{5BLvX_ge6f*9T=H<;E3%X)Ou=q3A6cvX(va1igUbv?)NlJ+$mu?GQUqJ&z^zR`> z5yoyryzYlsVBi1*RQS`vzpVcQ_-+&UnIm!`o5uZwotjc2-Nm>atOy@g2Gk3Gtd>W&-T-3CE92a+~K383XAFO@Ya!)}xh- zU>dxq4BTGdGs5xa+@3PXiBFV*W*7R}-wz%f`GmHvb&NR&l`aMo4Hv!+vu~WA_wE3o z1SRN1y5}qn0XNoawW2RzlCa52qjMOc(W(!F#K8M|?*QF@;3EMqG)O?I?LEhU94}&IWw7VRb+3G?4Zp}BWW1{aqTfu&IB;`BKQaqis66p z?IbMzx~6B$PH3kF6kCL?3b6+Rm#&${>r*EZd)G64!7blc9(L~GD36_68ZF1s5$EIS zjV}_>4}JNAhwrY*LHfh2TTdo$%H4JO=H{-Qe0^X6YaG(v=h%z#f{GBB&T)8#*Bq&O zcr^znNs47UDGLM+WBfAjOt|G97RcIF>Ls<{;D`g5RFk_ zviikLE_MG^a3`pQTP0EV#B{e9-aflw#P#GU6=1?psY!j|HkTcobhz-$%EjF)Dg!l; z4;5DP6f8}onx|^U_PKbDHB2%38Idu~Ba#jTmFD}#lYI=aF{gT^$bN{kL_rQEF(dh*A$XeY_) zb3AxbkD8sA`!YrIW+;SQzvsX1gyYd{DpqE&F5Pq3AKu#kG2N87b>uZuu+ z1!)iElvFddiJlHBxv-d;=opF{gfBgS^{*2ssT?NsNbsTvAHsuS z)Z5_y_umvytA=dm@4s=UBn=D%vGlZhgQAi=ry@9XR~UWO9BDTrfabgqb6$qC2>*et zy3&?Pl(P+_9%5Tc3+b6Y^XGM7wG|QGhqC|`?_5x+6Mk`oOj{y1H>4)1Sa>@#1|zCY zCr%R#kin|5({91qJGZTAhR8A_5*NC>n(cv`^3rNp~fblI{6io~G-Fh^DfNFH(UX5~!9G^W1gXIN>; z#Kg6UQ3*r@L3&tT0}dYQd&*Yf6J_C_WhZhh6hr5Wzh0mjGwklX*Hh#X8BG=d From e48d293e6201ef9dd22910143cbe9d1d92bc32f2 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Thu, 22 Oct 2020 11:24:21 +0800 Subject: [PATCH 36/62] Add files via upload --- docs/img/cream.png | Bin 0 -> 65120 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/img/cream.png diff --git a/docs/img/cream.png b/docs/img/cream.png new file mode 100644 index 0000000000000000000000000000000000000000..99a24840a75f9f4b3c20006a537ea0792a670311 GIT binary patch literal 65120 zcmeEs<9}pd_+%!w%}Fw`Z6_0JV%xUUu`!u=;*M?GHYc`iXZyRq-Tz@Nz1m$H%JQ+c@Z$M+Bn}g z*xuTiKd{fk_P~r&y>a4dV|V`E6ZD&b zp+!)Wk>x`Epf>C=q()bT#6%DgLBnE?hWthWogV?;rp5mBP45!bFr{;7S)qu zpdZ^y>jpo3K0H02Wvc)-=Kp5&j=ux*mI&CtgDDF`u?nM=fHUNx$OTZWfTi`|IR@Zd z!;l)%Z3p50hV}f8upii&i;5X6woP3E-kAH1Jy3lHxlNd<1-vRyXB!z8CNTiTyceMc zax+K`HmC*_4=Bv~8@&MeM3fR0qAob|yT=F&d{|$uB{kewz-_Ld5{oLdnuwbc#yy1V zw>I#NfS-BcGwcmuAK&}}(9$4PjG#0jI79H5F%EnDud%uD6+&IM!JohD!Y%h(>?l0L z`yvpBzg#1D;(dcf%Of3&5bP7yjG#gf5+~(1#rYMY`u#zQZY~&+)B{bhE}}!yNC^{@ zs4%`xq!swCtgA|EjZ+4x7RnoZDvkua1QsdFmMB`XY7QpWZoDikv#CM1w%%N`Ot+AX_BMA~PXl zlDGk$0_A2afjmGg;E{wwG-K4xAp3x(@s4r(K-nN;gnR@;w8}v2puix;pzJ`#phLv8 ztRV&akIWxrKj42TNFz#3D4|BthDz-GdQ`@z)}X0ZMN~^X{TZ8&tY9< zh#&P7m z8ksT6G~G2r9(Nt(Pmvfs9q}GFAI(ncq;02`Os-GZ8^aoH92fW#e&Dw6x9@VGIEpgP zL`O~ALR~;jsDZ1bsain8OFcxBLZ|;rR|!b{k7kfInNCmLTOqZ8?F8;#=JmUuw3hxq zH6F261utou5LhY*6>b8QSg2TL&;w9jt6jAk*QoOQIkzhd= zFs~G?q}Cu`)nSuoE}5@v~IIdZmiskIW} zO#67~G<$|lC9DjuZ?;+Wu(ez83q?Cg+A#yG5Ia&DB2Mjkb+f4+6go&kXa77o@)g zWL)gFX|-y#NX?b?2Wztzedk?`duw!S;cLrlLT*ZK5ssFwX(rZedubNwOhmDSUU5Wm zbo4e$3hE>TM(s7xE94633ZDvU3W}L(nfX~dnW?%Hdd_;Uy0cY;%l%at_VD()&6LfB zu_*)|_JsStxw|rHr&K(MJhFY_Va{u3_acTDC?DjdGurqAi!+YUribX+&Zd6{%{ zc9eJKyx_j=zaYN+@@;}VzP>_1MS?}Lgx7>Kg-La`2{dvDjd=Tz=I1Xjk*VInUnMt2>&|@7sLt6gna}NImHOhtpDBESzVk*gK)Y zXwZlmg?S54^|*Fk{el1BcD_swBZopLbX`gfJT|XYc9OW!hHKtGJnc4D3l+@N{;Ryid;BNjj*esVxwb6OMBaRl8Q73s$+m z&)hVYrIp=S51%&Szx;*ZprayFh{B7TNgVROQJ}Xb(nL*!?t4jZ()IcOLrB#*|dQ^6G_15$mj8Cqw=-V&90$o z*JWFGsP#xFBl8eNzQn+nee$9SZxe<^#S(}C`1Rs_)b+83Vb=@hx@O(@SSU#{jcAiy<7 z*6XtO>3j-%h(+MS<7&6pT0bS^B)NS4_kI+I7M(D)meSmQrMYo}x54afHQ|3gruVpE z^R#}sfxpEr=)Lvz85}tS_Zb`kbBBUK+~uA0V(I2QjpT)VOtMRluq3Yh&YjCJVk z=AO(Z_CI}dbq?dZ+g)xjST;wD%$Rs$Uw!Ok50lVj}GmQ{8GRB zb#@Tn&G#AUjx=UoxO@Y^;a;9Eu-UWVt4)+pR4e;W=H4=pF@ulT7ZW-iv( z)Hko0%WEEl5K=~VoK7;y&tlov_)mimfKMVn1{fi%C>`r#GdBIb^##>8CERBn6=bjg z`U<<@IiQ@ppe(BNan4LPEMzqYSWgPXb*<`m%}NYd!6k?QIq;d(8!8dytTflEOGJ=v zgL9P9asdNF9{BI|Etw9P01WJ2Mp|4%)l>gG3(6347-y(^-cuCP!^4A4wd5D=LJ7eG zU}K{*w9QAh6ektCd3SENN@UO3|Lf(@z5k#kMAn%l&46#;-F^De-J;9u?oc3D11wZD zPl*~6USfsW8h{1=KYK=^aPHD__5TxtfT$n%sF}xIdNP|2 z{j}-Br0@S?CN3#y!+|BX)#Z0HE%arnGb}2rZf{@J1VWgCI~f@nv(v74 zg>LJwa+(QQrp}NAa!H{1o8#v!%-ZR0ID=t}S1ca-I}& z$++#4;+!EE65rg#B$D&yY~Neyf`S4|4pPxTH(=u5zo&K!B?-+Y!*QDf1L6+LReGHd zXUofv-knGUocHcmn|hZvTV0*a(utItpf7BEuyzb5(pWY@H&>qUdAe9DUQ7|n@q0|y zh~YkXd&QZh_U3at-JcvCy&>jxmEzL|kwx6DD{p5w3dy73?Fjj1OG}H%Dq8g5Zs+^$ zcw>G!$@x{A+u5PKjLdD5-P+Pxp)8J83xjy#^4^W5B=+G5b;>W`GDSiSJzKl02>|s zE70V1F6((@E!%S1a3m|5JsR)}eSN-FH(~KoEn@B*PG-!OsnNCmqQ`{}{&sgXE&28N z(fAhwf!6|5JSuBtp3ctB-+u<8?!q)$FV0zy!+fR$(i?OU8aEW#{de<|?Ig6~n!5V> zMtp+|HurPQ6GVfJZ3_}Ed%`dzf}+J|Ljk`jR#w)Azhn8z`9D`0ZBm=mYOncW14EIC zcr7{d@|N;*ek@e!=}c+m9Pb|;$!@^`>^p9L((5$IMbd#v6#Hj9F@pvv6$OPA$8U(I zai(rUm12cO>oOq&zXvQM7Uf=~6rPzYYb{C}=4X5Lc7H-H zruE$Kzx#eSoeH5t449R`vGVy2(;*g)ocyu!`SFJE`mN5WADaL3c@JuBS=R+=Zf>q1 zz%0jQ)9YyTGv=brKSrp+W~C<2*Wl63ZnYu7?W*goYJEB#^U+{^*!$}*G*XkCF7g)u37f_G z+W)#2MxV5;L+I@R?$b{TV_K6_M z(;_Zs^SU)X;h*-8s>#7LmL~2$^80vL-fZ6Vmc4x2ix(=G@A$fV@&8=AJDg1XC1Yz;*B z%XKYyGHgL?c-_l898K{O_p4AG{SrH`gD9vEvYYVfEMB*=rh{OGpo%RJV!61umUY_Q z?YxYXzB6dmiC>z`!aaV5;AGEQE;RYtZg#Y8w;Dr!H^?xK!#P@Sb&2=7z|r^YwP8(f zU?9O%1o3!1=ojK(<%so4g1BHmw5s2(Y6vkTCmtA0XYcyV`yTgcYGE<%4Nn(UvC|)J z<1>aVk@=aW_3Q^yazNm=jMW!sL|;K$&-B}hNxU@afHP=N#@jqLy(Ibi=roiCpVnPg z)r}b2j>#Ol{Xh0ikP8wM6Aw%Pu6cd_0gjFv9bPUk+^G1(o)yS%xLo!_rsaNYWAdoO= zH%P7N;PSFYkw7t~%G$G0$|#>;a9 zUee*K9Kp8#AHWs_8OD2He;43Qu^F$E1W_EZs!{UyG{WH(9rRg5Obq=segG>DOXK~I>oh}ypk^kjPoDo_r@kI*&H^`@cXOep*a;i#mS@hL= z^}1>FV!?v(8;>9kaCtocHy18vP%4tc@YQ4OOH%)@!j1^Rffx|=%4m`8RfOqqZ-fAY z_*mRO-3&sa-G7>$78<$WWGmm}5EIY#tsLP#q3z``!;GY_`+fuIN75&aV?u(EZ&x=g zQe!tA(mh(f5Q|^5+X~9OQ2GaB_Yp($I@y~vAm>2XV5cq@QXSS&y2^hxX+LOY5Oz@j z3QZC=5E!(S*qe-}Vc^&3DqQS*6on=jZe@C$D@CXC_IS~lA1`DK zYqlWLf*XLt9*QlYrAz_=>2rbaP||#m0zbbF(-;O)3_B=UoQHNy->GIm7=_u#Gm6fG>9KiTYaGj z2YV`t0&2wLxJJ5pBDi!F8rAx&9f3mCosWNg88ygW`;X>=u+u0>uXRtif^%$M?@zct z?&oh7R1`l}7W36*Q{Aav zm4xD2$m9FWjE;eEGeqDpaVkf~h-ybuS%nDk03ji>G0YbYgHZ#rfp7tNQgkjEq-pjUq5JseNMbat$(aOBO41Rt1t@z4oPgb3mFdQyBK2x#e> z>AnOtnuIk0@B81?lf$~L#bU({OS5@jNh%67NmXGFt}_d>LKe&|yYX*R{C9G#lgUx* z$gW_fSpZW*(Ic;=`4fX;y>|CaqyDhbVG!g$8Q^>cIwLZEmaBoLc&s0Qh7k=AQcAwQ zdTTN6A^6=lL7gmDd`6Lg zr+?zdvI9=>sB&Iz<@S2nel=5PCW|)@s*Fz?kSB0{403AVv|A0K%Jvj}HEvQ<*%swIH>i%6ZlKq~A-+-xmMeZcD9ztUyar zF(;}f2+~%2Wy?KYCK4|qR1&XFC)7?L2UUh1gI)F3=^Q5Rbeo4Mf_AKM(ZOPwO>f;u zpdoi$PTAzJrN8Z1No~NLT$Ta+uWgp~VoiF>unp+~5yE%(Md2#Qwnz3J*Bnv>MRlDd zg(?k|RGqY_ip!57TsyY2qpow~5#fS&{}c6U{X+NO{Cu#eaK>O{l^J$`5JNX)uI`A4 zD`g-iNS+M%22leNO)t-j8U_rS9R=JxO4#%gjUjY$u2Ip175^PG`2KR&C>fQQ;9P6y zv>H>S%-Bi7Ziz~d9o;0E2a9qaKkq`s%F86H#bgGp{GY@zTY;~7zA-5K?`(?DiwNS} zUs^Q^f_=0#oBnThG*xA?``V#EZeq{vZzo*6-1H##s{AXdhqslczQxvBz|vu?G9zh{{3fS6{e zFjb#6I4Z0y=uf@=y_U(YEv24$pDmG0vY_AV$h3C4Bg-0Hbq5)QJ===AQ!I;oZ!<4j^*cVhU1f?_ArWoou5sGs2N zk1S)aB#B?aA~mKO#wJ0BB)3~t2_Y^{f<59yn@E~jQ0t}Za@bfvWQvu9NepA(#E+Q1 zDbA#Sl4%wxyHVvkAZJS#iWY2}3T^&o_`{>CD%)w#(-HbSUg+~T+2K(RU})I@z(786 zr^`AHk74EFCIr)w3^k4P24-$At*E$c9UTB(&dz@`fPrZ~oD>oQd6g>?+yTTuF~bf9KxN{ib98R$Qu(IyVw*} zG*Awk-hXvoKb<1Oih4Mp)9KxaI3~SVrj&#;5ns4RVna#D?yyN1jm_}qrRLr@;gflb zJQx;)8N4bs_97)r$(joCUURUjj@z-{F4U~?q<8SI{V@cEc`g57&9Arb_il=g z#uAk(e^dM=D~ODPON2OCsWp+B4}q6xxsDHh=!GE}*`Ga##H}6qCjOoM%Vkw5uY{Ia znGa*{4@kj(r>C>Uq6k;9(@mEXqUtmKq^~HKdgh^qrBui)S$tNPdxNn#WRWxHw_RZd zIT?_+4_dQtuiSp!;Uu~91nfu%+4?PVtPLzIsPf29?tjXiDteGnejVi|jpjWxwNv}Q zo=~Ny@vOLw_j1fHvD~oxHNL0wc~mWsO?-@;Xkrt=_xxvJ1kFP}gh|-&!;iy>JqoFuZJ!AVI6lG*=3rTpiqC8;s7hO-5H;HyMPYZ_K> z&1QiTn}0cMu-iVhisaIPL?G@F1O+s{y^W<#DXGl z|63!1r5e>0>I%R=6i1}9@8tLP_>c`!>7Z-<<+KC&Aj7K|JhqO}(b3x%KKJvP$JegC z^6IYBEDoC#-}O=zDhGf6uJz?|4JNH&R?qA0pkZDb8k$W|ma#=Ap7L&whK9DO-Dqv) z6HU3#rzGU{d9d9Zys94xG%iI9EL3TCzYu-;HV*o46&GsMv^ao3il1-Vl|a^!krC== zhLu(gY*2*5_)JLO3%N9r&eq6K{nQf(vDv)kud|d0LT1BuEpjsruj})BkI8DY1Jg&H zL(2|y<!}$T<%^yN7N#4Zq^JR(1Qb#64;8)%Zyf&;b=q7%7d2tTS zM=n15d_7}|2n*RdQ%zfzq_Gn_H5(S^4*|p1L?6mx^1eldy6O8#p3CEMh#B(?iu8fJPW<6^ zMVO^IDWb5zkluBGXo=G)TrGk8M@<|LrgXd@U{a{xU&o+ z3kwKcXxf$WAh7Idhef^7XVMrFDs;i@_&ARJk$&w4DON2>eV+>sym2MPz2S~JUY%z9 zWB{IM@O)Uv)~s>u$S|)c;qHGq%rL!f3uuLpb6Wzbk04G0oUQM<8Erc31qJZg^;m}+tjva$MVa5_!Lc-bbkx6$38FS(fb+L7i#PJ zWfVh&jE`46PQsh40x?q@Ikoq$AXhJT|Cj(ChP;cZXOZa?hRHecPjW|;wC~C5epa#& zc9%0a?ewO|^>}aaPJH;mY&=E!%Rx_4Db4TeZ?8JGCXo-Gj2k zws+auVN5<(uQE{Jey;eejVbLrp=ls@I{kKVJI*u=YCeq`54~4?(_$;#j)5cqS>E&R zkH<9$_!%-N>`-*5L}Wh1FJZIZ8Bjh$I0KN#gQe=YG4Q>Su%qL4^+)8mQgdSVJpM1r zAt6yTL<1PmqmQ{@M%@a-Rcw2TOM06g{Ca=doo9a2lG5dAJ^t!_U~2dZL4RLl zkZ6GBuwIbzDMB$nFhrJ*qS0>-6s*BC0TIMfQ(Wki zE7-#nUYNpkM}=Bgco}02=7DU2pr&djuS3U8*rQZl2|srktnT6|%4ARSU@X^WiNLMX zl9q*4JV@P|)!i+#tlaln?iXK*iT`4ap>)|H3;Ai9<;~6PLHX%dTccPgs*D|lsrfmj zZ>C>Xq7(w>x5|k6ilA<;&~tz9dXa#FRV9*F;@d&Y^eCSNDJzPE3qhFnvBRED!AR^w z^%?mNQL0@js>~S#Uja(>@Yb3gmYZ_lOEJ};|278mw8!JJ0B3+PQDL+3C)Xt^c)f~P z@DfIG^g1!;@&r*>8oZcVXpPoJvqL#XeOk4S6}J_GHip9vPyvm-DFW+nrbvh z+n4?2wP!Q5%IYR-pA_15i$7d?!3l}EkR>S<2J#I2XRXT_>*GyLjJ%P}i_Q8~LbDoL z#}0u0b`1>M^UHq7oyoDzwi?uB*hR;ET1t~8NisI0F#_nqO12Z9{P_181N zQRqoX*|#2G`+~&(Ez4dHvvP^3@@$D(h@_ThKkNz$NxX zp$7+G5H9rT1>cTRxujoeJfhW!pYbzpz?9{76rF210c3JWNvpV#fy!wVzmgif?i@AO zJx{*ggA0qZY2yt54~rv9;Av`*Zx;%_?N6|$le)2j(VT{A>coeIN88opT2i%t6AU>= zyKU<4Sg06aWZ7J9o{Rh}`F)Tswc8QK2izrg-1d7L-H4s_uR=3DrUQ<8pj$i`#6F8! zOL9zmSNPs`$GT#b%KhzDni;U)Qu?ymuwR7MBRX;}18m-mFOo6g2S+3L6m6;pzK@a( zsD(VQ$stbYwA!}NT`?1eeb+OLfG7hGi5RTzex)}6MP=WsBD(!5l*UJUm&cO3j@Dfh z6_aQbxXTK9@cFL%Ctjb(e++Yox{8DI?+#r@8Do+o#SbGj4;NxJw+O}>3o)5M>;kQK zl@8tIG++OqYAcd&>3=5dr_Nh1ck;RWeezhw9BN821%{b_`C;nu`){EL8A?xQW0BQm zuQuQ;SN@CRYQB<;PLukfE$0|L6fzm9<*QljS%H(ka(LtsIjl?xfiEdxSc|IB43{vt zoSR(UG5U3*zEa!)Ya*k9Yy8I-iA4VOC!4d2ZA*oPfUmQO$NsN-CrsKZsNhj}6I=KW ztv92Ld(7J}U&!p3&Q&^E%&5dj_jrCJ3v)_(V6u);+|aP{C~)Ip*I5S|U7~vjISsj> zkykakF$BS0y8(=`Q?;BL@koUcBlPMPmoBJxpdW}LlFFzZx+|V1Bafz_^TN4w@hS^X z5#0Nu_@f2CTqW@i)htph)x5ufJpPZxbWa^aHyPd4_omA)7?y;{U1QFk1E-z?_ua60 zAq+Bx_jw(d5a!jF^ZJq1Iws2<$3ZJeoS3X`HHpGW!JIUnG*Rz1DyI}ff ze=!$Tz&hkEWHEKesA(9)>Vj}w&*U(#y}K*m^4-xJklTzK$&SgTrBHReZYL?LklpfP z{mri}idd6yIiEa-CTE&PTt^%K5auTyG6ag<3OUp-;2=GNVuSX^CNtDtdvlMUyfqA# zMuD;_B(aS?PL8`}j6pkRxETL3qfFB5cfYYI?Ixb{sp~k3t^QVb^cc7Yzg?LaRwSoJ zLxBSYO@-DyO(%-YZ19Oj6Fj+CpWbzMSTZI&2HMb?!WLGoz%)}Uq&w5g`RoEE>ZqU# zDhv4gzKhc+kp{I%rY=luA3-ugcXl*8QOVTG9xnCp-LdrK$S@o|e_yA;%#MvVC>&4# z4`sU!%dDr7+d_u8b^Gxo2=lCGTthU{WY^=itW05p6?H0?jH;=k(Gj}HM=4vokBI{i zbtOWy^&o$3>{MTXx_XD$85_oDd2l@YXEM`$fiyC*L1mARqD#!zbguK=K!|X~<7#M? zDjSTAV>EI2d&ML^xEM58EsH2kq=5TuZ99!fP>=WJ(XZ{pSU=QUokyUq7i!^LK1Tszq zFm67)n0_ zuJh@`s$6hJ;Jv_*h4F(syxOx^V@{SnXWe$S6KB(7i7Vy74#fqJvsnEr_w?w4dN#bh z4(hw08Q=O{-ds+8N4}e|SVy#3MG#N-Xm5^bm|i|b{&`9vQG>5jcBS%`4bRwIfdAu)}z}>{?r$S(hH5yT1PCMuF zV@li_{!)^9HR6qT+X$~4D85NX8;;7^dG!ECoR!{Ph`6KHUt6&KPB!gLShP|H0{cfe?&xmc0l1g6!VXWyHX?jRU8%%`QTC!9H$zBuo3S4_*N+5C`OQX z?HTHH32E_$@`-zo4oTy6{+6KMG8rn4k=hk9S&GnTp^Cz-_c}DMXcWhS&anAGc(M5c z3C+^dxKy%37jtdjWH?e$c>KJ7w^|<+Fdt;MW=-I4Y7TB@Rh|BMwk=|)*+V~BqTF*l zyBY@M{Z$G+emK&{#paMKsFArE9V^q@SR6`~L)g!&yK=;{AZtzK8jo8T!RmkGvbs4} zdyyCZyU}h7up6WK z^{~x!n9ULT%BMHd&}yZT6$|PRK2jXd6e=Fd*|yKlAWgtHW0hwj;@7q=ml~f;9(Su&w@+Au>I+O7GBgYm zO9G8%<3{sye%grGg#-iXl>Xg1#-jy4FGR%e5CMZoJH@ zxVZ~ke+FqrUmyMhWd1LxrKE#u@;Q7a4TKaknHp~Gg|l!GBKyFNbcFiZsBZP-sr|M( z10fn;Q;eiPkcg*xHSo-~GpVI|vM;|?LVYOZ0{o_lTlLT>i6)Zru;qZg{DvMo8}3M; z!8J4O|LX`BI4C`9nTVEi#k0SrEPAez4MS!lwPRATg*W6)WE2sH6I5OkpO&i7{F3hI zpabSGMT(1o-!rgh5U6_uDi zv98(*o)N4IC~36J8FPlX-BC}xPhTd?37&Ey*pXnUE^uqZuPJqm>#zui3}AC4lEK}M zC~<-?IHSz1^_Q;U`PyTCaij!_pQX`35@gNUIN{f4J~ydg5W2UZQa~>4i&4b$nH#*v zK6mZ1-nSW5EaHPI1jYv1cybm`SR`v+N^cV_2S`@B&O}I&0 zjEa}Ju>MJitMa#h?$s6VFi@TTX_Lb_N7RYch1rFG^$vLiH<;Jf|J+O8l<>ai;(lyx zo}_-3tlnA`8nN^4<^BxB|KrYsUcSCa6>og-^IOeiW4|CGBRQCzmNZ7jo$}?!kt5>Z zjZUk%b%9w9tEy`=++V(wCyShM@ljpRY7X$nXG zEc})jBpQKpkl2%xF&cxeTu3Ty5g8QsxDphDrgcD@d84_V4ePj}1uO?^A5oJxllb10 zr#oJm!>z#;H67=RIq$&+uH^-~tpyP~lYHjyEb0mp zOUxN8JBFh_-J$a-;|iE>`*4Z)&C{1tpq?%doRjlPAMlu)d9lKt^N}y0JO!@z34{D* zHRi_5F8qk(8g(M|dzVQyU)RC7Zw({LzP;URm}Y^JRAcnMG2Gl-Zk2AoMg+F5I-}ld z_sw5wVW9+F!Sh*EP}!T|p5iJKhbEix?iHDbn0A)z-kBDI|CTv~UFGngmUJ9m2xr`I zy?4S%8e;eMG+!ZZySm_;oZ=%msYPSRCEn~KlcMcFy%;&Jl_cVEjH^-$7Mk(ng3C@U zxV8~;OaKhY0_& zGV&4~kpjk?Xv#4GDNjhxsRb)p zGs*da%Z%<0CC$s$we4^5hoYeUHWbn}hr@n#C+i#S2;TZ^{C$W?%gvkjdei5>zNkb4 z9-WVz;};f|8wF>)T=yv~u7$B5>-EEj3fatl{t+{N`D7qlIX^DDWHZZ8y$Ib9or|`< zB^)u{4dL5&;xV!;Qqf?Ra=BgUm)(~qb~xi58ko7v%b1Z@&UD-Qbd}1Xli&YJBADE?c(ksgjb_7G zSUp{H*}69C@Fn^!uuO^<7#K;L_#>jDhbO)SXC706+ArsbEgcKD(k-kB2KO{?i8AT@ zp+kPt+TR59;xi1Wci>`nRWCyJlw5vxpl#;(f5z1Ijs?KsMc0Ky^4<+5muMSaD6~vc*m4i{)aGNK5?$mcM6K4d?w)g;+1kl13 zMxCGX*vzB#HvNb#PZc3VlnLmKRvVUcXy z!{`hH_8Pu5W~9&5r9$mV9+^zAwMr?vMf*$#r$vb4{VE3jbF+k!%j`xJo_-enlAUs2 zb?JQmX!M=HGYMZ*4PzIHX}XO6Qr&Sf^c93J-W^c;b|}@t8P#O73}4xYQdE&&GykvB z_*spLR^AY;ikNW0zo1FYNy!wd>MsrYYc^2oQ0tDdt@X+cTMvRGHepE08-jT?>lyV zCnB6WXv)wP=?5XhmSQr#7+SRWVXJT)b?Ky;f;`@y6vTH_=b~Z&O~$n5wnn#7W+72I)npS#AkNhaGtl1}=BbRjvWtY^Y?t9J$d&kdnEtSPMS5&j_7-_+y{I`48H8hRAk z{-l4%$O6M2c1m0zQyy{p)*DC=S$bVGyj|vZ(14S*$Fe6%Oh@PgUVqBHF&Bf*88T|l zyVh#YH`jgx47=6KVPPircss6SM;ePqMVKsbT25ClC&Gsf#hQ&cbzM&M((Xr%d>;;$ zSLR1$GN;IVdpRSBT}pn@pQsj>?J%UXONOhye1 zyZ03r&6q1AcV*i!FeEKf0#Ni}M`N2`4*EXLJHUkduxbMDq_yDDsKh ztWci?wr!VWA-Fub7*)+HR88WE!G3vIflf8~jn|6u8^Bhu|8e2*%4v>3~ z1mBsPI8QgFEp;P&;(2k@aaKt#6(Oa0yu74w=F%12TZdzzY{Zhwe$8eVQZsO1{N2gE zqsoRk%CYl|#P26h8_FlVZq#;jGvACB(nPFCm{@(Chj-2$Zt69bSRzL`c;cbptb8W#Fj~p2ic() zF_zsOpa&ShO0)~F0gv&7uibwmfEPl~)#b2Wtrn65|2%pL7E`I6-$-2RxgfSOqm?kE z5MTZ&T(N2*m&0WL*HAg;WC1xQ zwj%HYy8ga>_rf`r0qJJf$6GW@Y|rULOSp=uI){Q5e$v24UJx0o9hJe=-l8SU?yp&C zhctUe2;{9__c=PGVPOF4q8*h68f#mg&k=g-?exUJN~cfCVM_pB)L)oz!SIU1)V4V5(NPwhoJ6HBWO-TuYD z;D~)pCs|CuRT>#t;q&X$U@`MvMWFR-O=gXDrR6LT3bY=Ba(?=~-3a3LQ!jL5&%b{& z60o~HAoQE|^YYcL;xXnL9Y+l#71@)hwaK-mFf?L57<%8#TjE9vK;I_mND_+cvc@Xy z7I`zxv{F|%t*uWs@GutGvIus!j}+1BjTrNF5#Da1=XpxeV|a%|>qb3*Si8%s$gd#7 z22fV(16zn1%lieX4hqPvDH<@Bb7dol=u+&EBQy!jxly{khks}?%?# zcN;w?;RSZMD5aWhU!)yF=t42|rW%<~&$HX^*b4<|i5i!{9n0X)mRhEg4#5^8OFRo6rV;SK^HI*NVE+2L;xp+ol{|ht6Ii>zQ z>~P<7s8qCBIVHYuH(DN0rpDve)AnVTT%5}q+ysu7)4gE{pQAv6{h^u$v>1ecrNfPm z*moa||D!KD?fD#I<#Fn4h2vDs_pbLX?e0=%<^5Af({Ykw<-q3bID_{aI(pG(w@l$uY^%^C@ zq_znMnbP-WmonE?Q<;d&cQNi}NnzK`y_xGk7{kbjYq!k?digB6;%j)503FMaXj?pP zdi!h7*}$#LoUrph5p+?U+QXj@gYLU_`d^Q02YY|Ev2&8}XUKT7;N;$Q*Bi|7@amb6 zIB#AR;&J+|7Xu1I=x`_H-=eMJF~NQ?mg%CJcmE$oS}uk73VLRL6A@`Rd8!AWq4IAN1AG|9Gv&tHU67hX?4g*rStmdc-P1nZI61M7yxX!T? z5TFZKLiUJNQlkWRio6Q@%=nxxq9TlRpBYctI18_bDHvpg1xRMr`6?StUA1f8Aj&L` zveA57VcY4Ui`FRbr(!yp5Q-_c>&WcZ{Sg-|VbTwGpI{s(rkGTmo4U-fKJ6asG`Rm` zoh;R2XJa+%hjzEb_G7#3*O{3hW;Kp6% zm{EYfLTR+%4BUhsuK7I+kdQd4Emv=DgNr3~*R*cv6!RD z_KEpI*MszEj+}|jSzVOrJ>5$x+b<@`fK-h9slXxe~ z{(Z9hHWYa{hdGojNa^ODD~vu!*aKbt!O%xAz0v3Omn*uH_VGLJzc-~3fzKU%xdb-X z4sEy^uXjweJx_tVMek@vwq?MxF^6+UT|{U-Tp!Mr7&k^`AYo(`ltwbk3sY_}xVxfE zV_PbQXo*`(1=1gPKISaHoj=(=qgZ(JuCHt69)m=MELxRv($Mi^dZWDHkPnu9aH$n_ zI+=Ugbi%vWc;xl56;`0EXS=X>R#l=?;d>>jg+G#$wPIOIJe$TsbtFUgC`q z&j;m^HK>Zq$Bx)9$?%lnzn#E0<7sOn$dw5MI`zNPklncrU-+;2x3;(GKYXBkjT#VL zt>98l^$b>UqDM%i{Xjj(XhHih%!%XmGBTBKj~mij!YV3rbuAWoQyOZI7R5l_hea() z@p#MH**Bc0f~b+*$G10ofF|H)LCAj?-dM6!C$8c0xiU2X!2r>xf#7gJqv*v9v@GB@}M(Rz<>OaQ-_0pQL1mIDt6Amm$pg1 z_wktgSGGdWk#yT)D7vOCbm{0!`_K6IM(s$x*7_gEbwb?bnl>}_;T=mbYSmcpnjGiW zyA25h9?p5lz}k|qZjR)9;#bEC986_nRjpP%G*w@}^B9wf%EL8t&`*2gT^_B+RSeba z8^ls)+fC{SCb#U-M?_hw7~afYPvM9uG{ccpb}fh_EdVe^u=O|MQixK#tMf;!aaa`v z5{5GRyD4E&9dSn90fHSa8D-2sE;97~a@SO zZ+OftQO<(+5*-Py{Zdp^v<@WC6Xx~x^eBK>W!bT^+PjqZHFBMN^E4mQ4wlgCSl~8^ zI|HBhGkt|dx+X5Ef%I_Pas7XbTxea(zv^Rl%4_>}LiSv5w0gwzxdJ;=?<(YJx{cZ; zm0&fHAO$THOzu5oZ8)W%E$vVWNo9v|V$jDWV%pHDe?+qV$MUC2rw&usdE|t|s~gM? z+H)LA7Pi_r4QqH-SxCmSiDxytGO=pF>tPP~$J(Xq(0g_w8(Gs7+&uP%WFr(rWWZR{ zda>lPrgbHdUNAYeO_y%nKp;}_?P&Yezq8(&dxtRIMD1=6UBVzmTi@}gkNmlis^0VS z?W!jbzK0daV*eMxSi&Vl59_bcuB)#ETP)A#cDp-@VGP{yQH6z&?A)0CDuRGUVcc4; z8Zdigf@FL~g-@A$c4q)I8`V*B_GM=*zrgpnVV=u*OYZ+jGCS#i)G+;ob0eQP;hugf_VZ6h-ng&_I- zA)@7(3*9Tx%5t-LHx6MzQ^u{Wk!r>zwCTGmnh4FB?8K?=C=)>$hq3}SA~EtaS#J(9 z`{iI5|M$4|ShCFkA0Ee7*;Tkoowu)S!6X9rIZ8yV3+Gzw53w{GuKSFP<*~Em0m7?raanKkE!v*h0zFNZmR`kgU{lfyR*z^pKx?*jeUXzBG@FX@mxJ9F zIHfWzjOYG)&LQiJdLv(O>nw^=yzn zehpyOdy8}Z!2mrv0y8&r1o}%Gix#=Fjqj;c)n=Tav+hZRoZ#_iMCBXs_>(8y$U7+w z<+CT%Pl?0^zPQEN?is!nA4~^vgl`s1AO~gTj+mN1Qi4BB2;+YfJ`ZgqB%}5{6A$98 zd*9>f&$&~$QM&@~$#t9e8goLs{AKcBlz6a{3-?M&{En&GBCKHuB9lkmWA)MA|3xnU! zY@m;31ackbRrg9Lb;}7k?Zuz;X1|>Ty0nNDQW8>;t+-&5wLSBRiXO5_2~gC4qn8|+ zG4Fid*7#xLFp=Gq$mK2=FCex%3JHFRd`mmA!;WYksXQ*;zjL8oMRH#@RN0gd>%|^v zapa<5!75n(*k_+9sYomng*&U?3a?qU#F#xJxekl~5b)vUj$nZfc%i~+WJ!r=-H1Y#g zaM6e(5>|-c_~YQ?4jDvse%MYhmCUA5YDSs?okZ*3Or`GK%GWFqZYwLdu4tHndYFG~ zbf3srRt`gkKX#B_XWR_++_J4%U5DovNN}1)ALG~eqKVEWN>c|GxdALu_n04I7(coT zC61;Ctn5b&Fd%(}+N>uGHd1!?i(Dq?@#CR3#2DTQ`y+8CEk(cHr;E?1l4aneSq9Hi zgg`Tgagw4@w|Pkv1ioSf7c9itKpI$DZVu-RF9hHnr+3BHVVeJ-0-FEg9_Q1h{V14| zml}H2^Cv6;s+Ix=!FYzvGU{!I?(lxQG03|?ji?er%|$Do1a+fQQLt$9-gc6$8h6eCG@7g3re} zki6RuOG2S(-`l-u`FZddVS!71%)b<61Xu3e4evXwuypjt z!1q6IN4phue-wiYvi7RzW0j3Oafx8s3+bC#r^W@@i|7GC|~cTz(PU$;57{67+S8R)^y zKkRcko8`mU@WnDSZlq@z3K~30d_iSfa!KYs5BGalz$(#ICh^bvkX;~b7HC5Ypv{u< z+h_TTR0-Bxmpw1=GUXT zXpfuS8Cr6O4%s}K3g&dm^^uZyM+LKAY+V|3l1tDfb18XPH7dFQe3xm0^m%6wL_tZ( z-V{&4BCNFkx3%fYYk%@ri16u48^Eq2cCr*Ry4?vm52rhp0fb}!;LuRwGCxtFIl+4M_RGquY_SLADv1UQTw95m;nwoHfLI@ z{vOL|-$B75i$yOkx+`l`2rleo6EtD8^K*TAJb!Q0*W($Qx-=$tehUr>l>YXl>)%1@UXrufSz_w23( zbBnCIc?>)5impqXq5JXoC@`=Ba)$({4=HTcM!BC)) zxZ3QrS)$8+B=i~%y>2#*n>xUQ=U*s&Qml#CTKBb?Pk1?>s^fTu0Gpq^*w++DsB$NE ztXq2XQH()(6If*IDBv>Lc;fSHrr8!4MN#K3a@WSbh9m_D_{lN3%h(gmzb!@(*>A;l zTYQWZW)hz6T@O~SxkgINKSA{VS$rA;^!j?rCodl*HEiDbRrXT8QNQE58TKTT@TujPS{T%PJ({hKlG zpYIToQs_=V^Wf4(3DFj76(OXRxEacU(*l6^6gYNpp#NJ_#n=1d5Y-Riq-a=6A%8mTQ** zo=;AK$lWfUtP~>yJ(&&EC(noR=g{|`K0b_E4VE1D3)&g&bEz#WJ99wR;9@MSsl*d> z-!T(>X-N`<4g$Ur25RB~;M}rR`(W~ycn_e5g5^=-H1)lt(P#uKovE|)P%A(Gr*XMz zrO~UMt1;@8$W2)dDv#Qe`v!<-!f!S*9s!nhi;q;qyS{$(%Q;)I`d()%F>dg!kp=>m&dSD@!ddX7v+-_#;pZB_1*J58NpY8&D#6d?h`bdP?IyqBpT7l zBF&A62h{=8HAT5|oSk>UPAciJFVaqi3AzC51FN=QhEKjLFl zQB6~3vA`Aded5YuYd83TwZ{SB%i_LVHWu-D8pDc9GF z(@awiZ5eAO-_|_@c=G^@5bcj;@Kupg90j@7V%U;=eKL$jeJl($`i};x5m@3+B_uVT z@JII56bT28bNRPo=w~p>?&r(#2dno;xBGDeWRy&TKA%5-MLf7)T31k(!Jvp_S$h6X zKyZ51ebEF5P+AK5P1|mqX^4%yVy}8ny#HxY)&XWo95^*LWR!hQA<`1`y1^*iKH`AX z!Wq>}EtF*N3>bMyQvdvq?B@$a#-{26K(pwFYf{0k2;GmCKV5dZl+?TtV#U!iS&Mh!QJSRG0T%8-46TCUUUd$N@)2Ogcr%U$;6y{Nk~6#3v?|tg1Ua~0jPmCkDFFgm+N`Bzh%8z zn9)l`SbaBy)n;ZtFVxCDtAIdxas}1Od0qL21?md9a0P4m+ea03<3E*AuOu$}tr`BM?>kWvO7c zQQeUdNY~52jQ*{jT3;W`?oAcR|Jq&g+Rgssw{>`XI1jKLowJd#$F1URjbT%yCJ`n-aeMVEz-vN{gfApP==hyF?O=6Hu|EJ|rAv)-FYq^o? z(=vp`{&P&fDo_vtW5?kUzK2Et2y&;s>S|NMs$;frpXpR9 z7a)xb>{5_wFp${dKmTPbBU85rbCF=+EYUR6A22ka>+B8c<8FM3h^3dkrigloW;Mnm zmi_^u!TTmA;z`O=I2_43)g)7aqr@#PkA2@1+((GJixmS84L9t^sJvj-!?d)XMEvbd z8JzZz-9uM3Os+4K_TBww<8~d}S7J;C*_0zk)Z(-SC%_x@mnT&P_;e5FIriM1<@R4h zWDEMyR6#b-v;hf9zB9gN;cAmuFJm7fyJkdK7QL?+Xb-@>V>5x`2)xyHZ7 ztC9sNL$N1@JKqo(alABefjOL&%B^-9fLVx^*F*kmIDM^x5bd&Qlk+(NtSt+P;&Lr; zQ<_obV~LD6X9_`}kB6mkpP%d>4|peI&9oL9h#7NzXm>CK;akagW!nCR=kN~G&X)!h zdi`->dWa6fFFvzEX;!-=pEfd(K6KBEC#u|3C)K|!j(+(;WJJc}N$re7wig1?!szkJ zb^N49SV{y}{$9CQk#cbzrUI+_E>x+3O9Zi&brs&&f~&37*^ZL+^=-={LL=aNQJetO zG2du&nfU0|yT}Z+%{H&6zlmAthW%RASnKFQ63$95$cOFyz@6H)Z^lu}qAJw;)t*n2 zaOz~J;%K44xFm7~(QxqvAJ{T8vit#u^nk~YC@c|uC<6|0!w8EJnk@q0$u36bwi#^3u%^Ilnp4A+Niiw{-S5{<~mArkahr1s|eb9?h99YECd+`Pe0G=`&3>n=J$&Db&&|y z30u9MC6xrcgw+%m(;A>8xQ3waF8`Svw+lH01&1nLxOP!-xl?RpG)-AYN9^YMTg8-} zVm}vIikPBGS5~lM(dis2+;`f&jNTf8mk)9_^HnJtoQ=BbVa~W(>tuGU1S!&9GNQkm zbIKnfGr6M5A48_goCYX8hlkbh+Odngtmv0 zm`nh&QzZt*t$VAy=1M?r@L+$=NzRN0`d58Loi7FedS}fT`(2T{PYQrvs@zwUy!PVv z;&c4wl*Po9Hi&f>^y6D`mIA;Aq2oE_g(tsj-SS#DDSe#KU&gI z&a$KjcemNs>%07VOaAanGM?l2nyW&tsNdG<&}!;*XQ;m$fm%Ju1%HtP;RJtJd52)) z3#0IaZ-63cQkuRuDzM>gzj|x~aO$Ff+3y}Hlkf5ArZwaGuha{+t`pLg@O5{g8*BZZ zeFKb}lS_i0XLIwl+h=Ne-iwaMq=e}BHwHcjvKyB_4PCYnvOCNu*)?5Ro1G_HHx}^R zog`?8^P-4D zj%#1mf$#LaT%a~D8tat*A^bto!0|Q6)J%m#@q~i6u6ulROEBU9{TrqbA>WUq1rQxG zb_6U5H^P#FEq~ox%2f^7+4*e!=9ouRQ$6q(Hb`Q=lDC`e!Z3Mms69WgBp)=LT~^+ov%&-bPDId|T#TW;gG{Yk@f4o@+SH+IM`ewh3~?0gaDXX_z*R z-};`e4Z2P1(QvoRcPAvA7FuTs_h1_;_mDoW*YW^HOdSf|^0|2*ER&vJ3bsQHpJChI zG$b%hwdoVq&y9zWIrB}Dkh-3+Eq!_s*s~}QDktGMqPstJZ*F$&`#(4xOip>zA(9uV z-vd$|SfekOKN0U|;9kKUN(C3_<8W2-0POBlyxKY$CGw0-uo*D4NK_WeQaXl-u<`*7xD=!ayo_g zT2~NLR@a8>5^_?+$UZ+MTE%|ul3|n6_0P9dO~kcPG8hnHIIl*wtfh6udONLV?kEz4 zjJb+}JO8xw!2*f=J}%*|?KB(m;KvjA`ji$y3(jS>*{sgM&A{{IWQ3K~3apJMLIaS| z9rkPC*lrfQLQaJp0k$UGhnUFmD8?F;WgL#a^N0|-2N4Y-qij0iQ9?r|96iS2dwo8o zD-V|0NIBeO&7}rOi-)pEm%}Q$)N*`M!7%5uUKaRp|B^3U$$jJwZM4OMp4In4(H|H! z#$Jlok`0)Cy*70caI8LucjP`R(E>|p-9uiIM8P}3n5&KtM)y)@GRs^!m6YP$h;bgK@qEuwkJQ}wCuo5`u z_*INFL%n$^nm*v+tK*U?Y9$X2Ir{BzN+i*|CPH=@^!_T#Uyz5&3&EUwsmi*h?v}rj zSXRB!5YQif6|B8{*}5K^1-bU`{COQ7ub(K|0?su>Z4=Bj9e{!0eN$j)h^)V;KZg$d zo-E{iib*gjPOz>+X!2E9t`Dc4ub2lZFbr6_Qk=_b{yJyNuCCS&Do8 zkG2*Ptsm-~27Y#coR}z#m0|%~Ddt;~3q)|H5)M3a;Ymc3IYpaOtt0y@=6LM%+h}6t z-hb!X-E7KSQsJQ4;kWxY$4lXP2tBbSI$-&)fvU(IP@%Ah{;Z{Ga2cJK>dIsF>8dosCw*@EEnxx}qVq%dyy;pcGELmTB&wa7> zzH>8suq(?Zh|3&vtdu|-<3!d)h#u;k{7qKyL7xj|%H=r%^IZbm1 z>Wwhx3Ki1(0c#TFhf}Iv87@f)h>@{*|68G-sAgbCSomqp zfBB%XU+hGN_6wIjqEY<~zT6#(au8=F3^+6#KjUA=n`@%=i1#KAX2{^drrjRld5iDw zZVr9A+{{@?F5=O2tJ%@#{OOJMooh8Vk&Bt1+0*o9^?O9`O*83c)~}kWj?$+00nKYl zJS!Nj&JY%rG9*u0k`Q#cH&0-{pmZqy2o4cPe9m5jGy1XepAr^m)dacPn=yTY^+M!q zYo3ytk$SKinb(QX))lO}-njhAibo59izX_c*M}Ez|nT9FiQB_XwSDGC`7j2)Cr-to~lAKlsW*NNYjJcTt)i+BhU&j z-1AY-DoVZ;ss8*|Tabjp(pTitd?CA;XSk7yS9gy^fy69iWM5h#|7}QhTrwqECRz1R z5a#7yuEoQA`!6mlE)Mrn48?A~eUZ34SG`dP#D570e`cT~kg-rxZEuAQSw4kv3TqZPn z^N+>p`CS_x>oXLbwrJ%|P?X5#Z4^ZOFnlPLL0V6e2Cw3G6}|r~tyHqyP+_yII8$#7 ze}^1uuw)K)PBGv{(xVztH_W2j_F8xLMPmbtJcKG6)^=Bd-!guC_k>6Uz5B1^yIF?P z`%&o=Vn_(l10~f&#=~*Na^mSA9;NfTBO-wozU=aw0?mgb0CV8Fs$yQalxCuPAaI}I zR_1lII;E320vn?eh;3cZ8>HQe^~xqQ{t)^+@{he3ZQeFFRL*9di&U`*^K&g`D5M4R z`iJ6=ceiZ;&r>wN#NlcBex^OGQinSAD0tf3{gtfMkm>-z$E{PYw?RxSfqyUwlF+Qt zKP4ndc3LJt{YIrEQq1i4jdcpFZloGY@Sg$ks|pC#tsb!fzHej3WA-Jv$YC`>;lz06 z{&h<5oXlK>LSKnoCw0!dxaYXYUn#9tLjZb^o&%iS_nHyG3|eQO_>>6L>#5E;p%h`H zl`=1Tu~gC&m0v<#I`qmWPLCbTriD0kW)~8jGNfMhUWl-D3j1S7>EXFmV*dI+9wW+s z`$=B~baF1Z*AJ&Rq|u2?wsEK?PDj^8Rm1b+-Eh{fv33NR|y9d!@=tLXUpSS?@DP>i( z>ChiU1y$FU1KUKZh6T1*EFkO~%>McJNjqciB#ITOgWcsZ`{M>U%Y&-#^DG~N&yC}3 zLZcIfl*by`3dP9@W?brAL&D{w+W?p~+c1?dz)5kLXLRvFC4xKef@Ai9}!0>WV|!_ z^Fj}>naj=H_fE(;_04Y@==@IKN;~6i=8=e)6YP*(^WlaQ!M|0p)c_gz?_W}TZs95h z+N6NX0z2Pqn#P9pBp7J(^ zS$8Mleb?+E;y!ue2GBrXAMm8%Yl&o20XVqj->2X0H9UVUedEe{af(@Rfu!#S4)1HEs2YwA9Mz4YsmxtPd(m{`h)J7E zNxXLWBcsb8GN)3-^{Sy@x-}ZuYHb$07A%#Dv&Cmz_t~%nwwCTgLy~h85=nKA8q-j3 zx}6nv_vo-{(z6KWsb~@VS&ZU#wHov+j7d%?9)`-+fmBRO(i1qDUx?gSkZxEb-oI|t z=PH7TSbS^w_ZLItBj{L`wtGRKp3X^dOl*E~IEA%n8@!>AyOCEXKp7{1DSIaX@s9F0 zAfWJAaf--FGo#ENCAI0N){nF;WX`b^*|0gkMwP~c5=zsD#fh2?lnToHo-5|tSB0K- zl}`MZIWb|uJDjiP^s5Cyhp4k9qE-`6p{LT~16Ct|L3HqRY+_`!ms+1$82iD;DO2Nv zAavS!pp?enBZoML8ckBLJ55NTm6P)rghdNoSQ4NuE#y*-MUDDyIws7&PW zuNLoLS}Be7@vyO2dTLAl$(T7mw!!hCK6Q98wq#2!Wj;WGIV^JMv7XS27%>o?NnW^i zl!3aGgGaNZ*oRzL=XKh?sW`t#9DqL0!%Y(x!>M8FWw*%1yB{{_k~?dS5H!$vj?`zV zx6CZS)Y_G=JP^rJ^Vqf zr8W5uLyo59K55rn_S$gfL8(|J5)G$cY(+0pN9|)?I$puxH1{`=l$xofM9-w~;V5J% zuOg{on&aDiQ0<($c|SZn#8JbO#gWx$ia*u2DiuvCbBtHtEHdcW>||6a`ucMpP6|7r6Fad7UU2@1Npw8X1Y)C;E|_1_`f7T1&2;(7xOQ!|hsuo4GY8WmiFCyEKd@sixnISQVArzcOh1~bN+<1*Q?Ql0*C zK3lly3yu^%rJt`{Mlu6cyR$Yb(Drg!)fAC}c%Am;7t$I#EQ1*AQLzXt{465a2k4)x zS2(I&Kcn_p-S$wdcMI^(_kG<3MqB)MJJ@QDsIVW{muN6QX^0u91^fvcETgu9j#M7} z(rt4fVt5C7%x9;0|LICR|HBkt@PCXTfFYoXFgy^^mb~K$_7RR@VaM^RI&J!aaKn=J z9qH*RsZrS*P@PQ7>tJK5vm+8oB5{eTt4FA&TuJ5+CBWHSYz@(d*%~qj6YXfs093kOgJT0g}7(=MP^6XnhAD_tk8G-I3`Z6!%KH&Rz)k`jAF&J;4?b-#U|Y3m8*ddmu0-f z6o*}JEp{GSeFCz}F4bVw^3lh%iN}?N5}NRQ_v1Z#x&&UcRrS~*5S6>N7%6FdjUeco z{Montx%nm_&3&gZld1p^mhl0qHq4RXz zq?)NAXdFH}xXe&EL&E(IV=TCue$Oo%cPGJ!&=aPPp+E-S&Oi?&=?Fi;5qrv0e>gww z{8X(EfVeGqb1V|!5{6h@XzcyjXV@9qvw4Wo#!==f+wz1|mKpJLqvNIdZ=;Cg$7at~ zoC#Trkx0d25Spcfoi8fr+EBO?rN2c?2J>Fp2wk`cgB}~1a}U4c7IUrUM3p0V#)qc2 z=)~q%B<~XZnm-C`&(Dx20QuO!JByC-&|i(_O=OUt$l zp;Nu>qF?f{Q29zy^2B@bmntiRAfjoRZ^rryO7STe-K!U*F(K+R^GBLhGn4O4$kT<( zonI;(l@EsJy_p@oZwZTpfxg7=M*3N2u#)Vz@X@%kGNSNLhRnzhhKS46UxwuCL1`OR zDMeMFChpm*6}(r&N^s9Y*5XpZn&WWwYmr8lGlR=g6xjHx^e^;NHCmKc zBpP(GV=#`-Aqq*V1X*MswJ~aV23!)vTjKuyj1aAqDWNHtMeyjSg3Ku*Rd7Rfo-qkV z=PbAU0z}$U)^77Rd3AA`%vstPKU|RWh0rdj%>9v|W@-6Z({A}!r=0CArQjQvU7mGz zr5;4%<;x?8&oc*xR@>W7YPkXIQdv{dx^GW^)57^KUwRE9qC373Zclust?9s{j{*k0 zr0D8IC1rU0dkeYZ#gLC8oDKQy=1ogm)<_bHd|H#giK!(B3-S_M#3dzeqrXAhHsi^F z)ebcSPeR$|(=IF5ymB6dhpdeYA)STg4@RPsBU=^Z&Rm;g^?Hw&WE~t8Hb52>vTG3Z zOl%MhPiz2RQ0e%hwSlqw34H!)J_eWWJjS`BqLdB|C1QQh!K5fr4At@oO6GwWnk?6(MMblx5fIAAE>I23GX}uZcP8{jk z{&thvQ{w(Gjae1Tad6b_b54~%y zJW{%}JYh74WYjxWz1<-}=p2Eg`rSSSCC;8lyoq$_FV#^J zTG^e(A8r;XmZ+qqoqN#PWNa){Oa=d4I+(G|3LE_tStfQ%_t(@TEC5;Ut=O|fh=^VK#MsOtQheP0G2PE$KU2X%Nl^QG6< zvXz=qe!YMT7AbBcNhXO9tc68Bc#yKmVs*13gv0g|7?gr3oFI<61q3S#c@L;hlJ3Ne z)WgXe2cAm4vww8XmTsPbADB+y7b20%Id=f{=6CM7Szz|lyGyM1JzZYA_r?&%k2133 z*lO=il45DUpLF{XD~N|O5COpoCzd;zU5v4=h>w<#tPSp0d_q3iMiOmur`_hx_52Rb zJAEnT# ztOp+s`hndVp$_|J&(`JwV<~|-ItxNN8z&jda}>h$q9v4yPL1~H&m;QXTiexy93%R@ zxv#_UbV8SME-XffsZF*`ZjEM>uqjnj|N3OQtIe)8J?}#R zITXa_Zw-!Ev#7rwzaV{&fV|4GxdW9tPR}2Z;>0Bls=r#R6wMbz3S)=~?OD-l)Oy zd&H@R1lL&?fkAoHUTbexRC>`SD6vfpE*l3Y#-wwWWA(I!dCo6muT{Cq>?G@>N^@{e z1dey(2{fHhE$0F+5 zw8Bb0qzm-m(0j0pM7m;_;X?k@tvLzeF4qg_{JRmGp>l1hsAeVte1jwHvRi0L2z9kI z7qzcpdAU||++NnnL!ru)8EXX5#HnxLeKlD7U7D{o@+d;8TCidRj7fP=B(76_jj+PHW{_o}Vr>|R(N(h@%*7}CW ztF!-SX5ROZz$ZJfb5Yxd;o|RhgxffVu$~WRRncJL+lV_c&Sekm1JC2;8rNo$xek77 z-t7^$`R@?`M6z~(OV4$|xBBU(Jsa6^VLe>6o^XCf9)OGva{2a>q2y-c(n)hW%1X1Z zJAQq(-?2^nspjGzsCJGD1iUdBKTzyP)E-q7aJQ;aR9I6WmsdQsoN+7YJ@B|w>IDJP zG92<048W!o=NpoSvd=cM{VyrCJ;s1Y7)(07OMue;-)tD4?X#qIGyK%QF02}2&s;|l z*t9MBez4L^=-?P)^$4b_R45z48~rrZxlEHy9Q7uuEP+L>=b2uuht5Q~SC57p@HXfq3iltUsQ!p@D>|r2IrEeJ{svJcqr?Hr3&V}lubyyKV|7Lfwr?bp&8Kz`c!?N0OpGK8W=Tzc7%5nxJr)*2 zFHe6g9uX4CZziAYO^98-YwY~B}Cw&LYD&es-P*+1^X3@Zfc3_%+CC6Q!u6N|_N{A8rE7mkNx zuae09WGu);e_a`f^aBL`G}?ypvY^`QieB8FN>;vvOafUDOW*rh65$h0?Uv5#w`5rX z+++sc3=Bk8ms7O@BvhVUE1>eCMH#1AKoj;6$yQ} zq@s*kcf;dubH^M#okl{JUSGA)nSn;OdGD6WzLs#N*SC69vCN#u1i)*uPd~ z_2@?;37;+>t_k#f3X>2<#oY$1Q{2C(*uHay6>V~9`4~i}XZj~HY-AN1oj6jliO6&% zxsd#8bEV;V`!9>$k?n;{pa7{SsTA&1odA%f_W)z{;(!?khCna`AY)rh%3yDldu5{@ zzpR)n^&r7w43BZ)`F=+s=kxz+NQ3=bAC$bBHR$ak#5SUHJCHic{D{^1`b0QhQ9zUd zOdj~Ylg4n`b{RKH>_g|GKZZaWmSe=6@9rQw91{f+-h#9BJVrOYqCzOxT)=oQpo4-g z4OJ=njUtxIA*f!ht5Y=mMX^P8kQkdTUjcG6svOxEKL5*UJ(fP9(}N#ic~rET?f6`M zcg)_^+eeqQTsIVe`xA4*p-l$Z7Vbnb&Vsm(ySu+0m#On*BF3GC^j4bun-oGbYo(O~KFk4>#RL_OWz?#x`;G_PK02@E$i7Bl;IERSWb2s zh>vaYZ+x(fD`c|nBS@o+*`B7Ah@BUkqTecSO{N)^P50l1^89%UYF%?$NNDz_yu0Au z&~N-`8M6?IFp39XI%G31FY6w@E&%vEJd7tVD&;aTtPc4NpRv}6KP)il}n_^2|o zzF>Hq{q?vjhF-gb&1lf8k4Znw);hYQ`PTy>>%BpblSF_W5x!bPSMR3;hIvuXiZ70_{R`r6 z#&(H|;vcF}xZIH){xK8zP>k0$O9k_-XPT;7K$w~czMvC)ukV8(CEafwPr!t>qftz^ z^E=mQ-xqiC`2EwxMJ35)54)oW#asDm8|d?c?n2+}k5+q$hlB0G)kebobKW!S?9_!E zHRRM7dM?}L81_v50_k~s60ZTur4Nd}*5JmchyI2{&b~U9V;l+=H^cH}F3Z9Py)^P; zlY$=VQ+ZrGbqPbi^V?V;G6d8jXaLW?|F8 zUacNPk>!HLxnbiIo#7dN4_30tF=WR=!~2r*dTsT|QnOVd&?bFOvJ}E4+@jsuSf3hZ zdQJ0m3%VVGa@0pVELUQF^3BeVarHfgxw3*4)5Tx9%&w_V1>Y4DI^gqP1nTe>N}5fV zpa}n~z@oBCkD+Ll^FiEYhLW_RnxlU|;5zlg1I|06$S;8n zyR(s?$Is_I5;deG+JaQ~CNe#TO7f{A@V_TMPprndJu-If#o~Y$gP#RhYlTc$Y!#jS zSQMEno@LR3c;P(a9td9_O_3Bts)8FbC!8s;Qvdp)1FnxWy8@UoZg$FBRAU@bS~{xC z3U1Fz8F*K2FUO&lB9f)D=9>!q+@n#26)GDkSLd&`ei!Vvko&iwBo#DyYVSy}i`z9q7!ri}Q(Gcd3D>UCUpe=u9x~ z92)TtcZ6yK%3D8~?oZ~9SaKQnO#S()Zu|J~VJk8*nWIX@`&M#*DF;wa=43o8b_Y_w zN}a^n8IQbCRa5)fD{xdNP%F&(?YI2eSnBRgUIdPY#qW1{AtbP~{>!4aNqb>N(ju3= z+f}2Cl5#@)Fx9Xc>m-VZbf6TTjAjAo^dL>2*|#F+V|dBWS5nm!MAWK@5!y!769Yz(CQeh_T$w=~t(?vk)(P#sGWJBk&xKR@qE6((^XQjblZIdOObhsid<5-`k4XKB80E*H)PL6o4HVUt-vvT;U23qH^~%eIq>dj1j+&& z*c?R0`^kl#UenpBcuZlMwCN?uKmUn6QdH*p;NJF;IQcxAegu#{t@PgWWd{K#PVAy- zpYn9M?-{>RXJbxpQvM0<{j0cao`C~p)t^KWmTx4l9vyHH=0Fo<}vG}VzU7v}8b$009(n3c7DqzK= zT#Xb@$$!`wk`FmP<=;vcO7)NTbrH56kbN5<9Nbnd(*>Mpjt8gK-9a_`&&oA(ATojs z<;BgguCh~iv|6)Ig*Y0p4cJ6tlp`2POq7o(Q_RmmsKs0Ib*0VEC!xs$dqv#$XXo7p z`i#RQ&bL@^ISP-u!2*R2f-iCqSW@(X#OscK0B|{AXG|WExN~jVB!@*N>5k&9i^r1Z)<%!!)7V_BaQ<%?ZmdsYoV3^@+yO(hdaCP z^#3?I%c!W@E)2g)cMaVj-Q6HCfRfVP-Ca`BFd#^G3|-P)(kU$<-QC^r9lxI}7Hftx zoQHk(zV~%;6pa)eRc?KT!^WAq&VeiOdnoL>nux+WWYk6w{OY)dPT9>aCW3%68 z1#S`Sry4(jR0uPJZVJ0a)g#C&A6>V*|D4(Zg-+E868XOku!yQ79BnKPr=J;zuKt>d z>4v*vL8ybg;C^5M4|8mWQO>O5-7N_lCx$mBt|fg*tU?4>K9kTuj z&&e+a&|f>QFvp};CH@MpECnk~VB3ceM_5I{%(;v8z9?yLqJQ0?NVfkeX4mj+Yc*@~ zAk0VzEKD0=2hSXi7h%Y3<-}=j<&2IGilt`OkxJf+WTx=q90-_4QK3 znrvc)bP-uGfNZ%L;f9LY34()5LsV*eL}U65Q#*3*zw#fL3YntYZkrPDdUJaR)Ka6` zlFz(R_)$2lu+^&3L_XIa4azmNvXZ~INa&q32yxVkBm18$k`PLm(dhnezE~se+)MdS zi0k?&pV<|bIg9mNgcaeMX7_jO?rPnlr#ORb|9$@in|ST9wA$JKpuO zVOl+oDXK#%b~6|IkGx%%CZLl)iut%v6&%$O1KHwK1iWE1ipzsS z{USdRRx4tTAN`muGI=oDUhfH|+emE>c?Sl*em7q3fI;woc;~p{$=CVjjOqYLe*UI? z(otq90i)mrYmxa-<05G0dI-$CPcgiOt4m5|2yUtZFvLkuaM)nOLIP9isJwHvW>eyQy4>g&(Yh(jrG5#=wjM-!+ zI9sitOpx;_hLC=$7duSOu^{Tfer4~P6gedlbhl#dzu!@B5dKQ>0=OogGKMQ-vH0q@y8C9!6*y;vAo{;9sq@%-D}&2F6G{pRf8Whb~VY^4}o_!_!}~^ zF_C`88PPZ}AF~(1+p4O-WxuUJlbM8-4oVxn|#3j zLB?zt!#&a)cWXUJK)VE$#V zC3#HOpP%I@gmi^^VuwC~pPzB!v%jMAE!H_{BrIBCod|W?o8&vm^rlZoicXmftaQ8J z;|0>0l9NP<;?wcV7BxqZZ=#Q3`&|?wb{!T7`FTzTl?#2sU zqrvKYQqePk=lB~ZrRQ2}wFEl8_2u{|*b7lCcKy*O9*1D-`F7%?ER{6Kd&}#Umw;t@ zv3cdX{3+(T2YA{`y!Y0=3|T+S-0asaZt*KmFtw$88&tVpihP$PL_s9rSN%J0tX1)v zSZSB6%T}ON%OY~Xujb~$T8-01N^`d+iA6-$yh}ZviL4nV&ixP%d1Pe95_j!R%=N=6 z)?Rz85P6lr%;epCRb0cX*qS(`8nA*z1@T6yR=@r!*Q_!!{QNUKiK_ax(6OuWpQmvYpDMcLcqeUF4!6ulOmLc=9fQIS5>eHzDC>d@2p{el? zMoe;;avLM_tROT9Glv^9y~73Gb6dAnKl(-5qR4rwhuKYF%)?y4w&|i(PdefhV{2bN z8&$&29UoX`>ldHFC21}q@l3JOARy{A&jmJ+)5QB^07+I{dOAeA&SC>tsYwz4!_`c{ z3eyi>K&bk$%iBcX+pTEIa1sN((H0Pq@TX1ABh+_XgT6>_PQB}I_hj_tr|qHuj*%Z zyN>Ibqs0;uJvq<6z2H zQyVnZm0KDg2L@wiW@*oG{wjcgy`fd=f}0%DtG%>S_A!7YPqL6EsvMqF!s#K3S#*5j zHg^;gMGyr`8oCp!j@Y2av96-#=T&w1#Y_rv;p|diRSuq^7|`;%l5Ya(v^HNVx3;yu zdgFI7!8fgT@qXq#H(6&OYjhGgPqrEi-!9zvbGkBlw+#I!Uy3#`)c>U0Ngyp|=}x5s zKCqO&XQQ#nz8}xeS6RnA(FnoRhMZ3WzGvZw?S%cgpBWl3b}Oy8fIVyQpZvW$7-8$5 z)qT)zX~0Pqs?6}dyxvKgV=EV-ZT`VTzhy;Y1qCWq>=9F7-||sKg!<8tC@@eHJX9KG zgnkBp(ePc(7)rrvXG&85vhq3WbxO&DdO6E6n((OBiBQ}kEftYU=)H7`kp_^C4~TrX zg<485Z-&qd!F$A$Kz0!+rTP^Snw7-NJKq_~Gyn26%ydYMjC>ZH!>05XmdiP>B4NgU zalNEsH0~Tt$nv)R^{wBxV!e2%Y#x%#9EGVSN;zSIpW-xXfLDKPoqT6Wp`n`Ss~~mL zV+XqX(d@_nLk`FO%>4w6LQxrrja~!!Q0mI(=67JfP5wzl!whRG6^SLB8+zP&vI+_F zTt<=f@6^-w+)2{ZAeM0ors#|^7+8JmiAzuC4Xb6F#*wEz4_@ZL7 zfHL06Tx~<(4?Bd&`|5BkjUJ8{hSG&Yab`cJumW?*&5;Xz!KV}S=lFEm9M|KvXvUHYS8B^dwYY7U>ic7STXm9yVEFt!>h{FcMsTU$Z`0^7(9Gr?8l+OJ)yZE1& z1w=@T352bfvVFMQ^o%+CFg@HoI@@*WPem6vdnFr+kA@Mx{YMWqrgR(=o4qcE1y z&CQ&KFT{l^Na%?)Ooq_m5lywl;9K%uP)ztAcTD_7otu$MIh3V?>LXmgn4rQxpSO{8 zdXsqY*Ktlghv`n;mDlV)WV5xjjz?fo17{tA@bh_Z39v6|;Jt(U8xN+h3^F*e)AwP~ zK8jvBk6%hxurKaAcfPhx<#oK}m&-EYW?(P)V#RZ9co4LI-SqWlwkN7mQRsyGAVBI@ z#h22#MrjB$GUF}@GZI-93ae5Ht(nefOsBZDhG;U-|I^2C^;NDdvhqKhBhhBf`2^v5 z5@nL)YyWkGnfZ!3Sx6S&_?JAQ_yG%}RXP>@>z#!1d7?|1Xdx7-5>wHUz6!8s zrW$u+8=@@m;@3(oEqz#J@~iG^J|Soy_a#Wk=vJ!=)=>RwH5OaBh8axvyUSX2*JP-I z#)@-Or?1Hdrt%bB`X7@B0E%w{W?1o9GLds&Rd^HF{_X+NN)6Oy@C`}=3Xcz*%)N?J z=;L(*>n6JJ*%J$~=$pIH$;-i`i=G{FMHwu-aX{wj{e7~e`)C{B;q*|)3g?}KW{Otm zxA6kd0pKCGL=Dn)84)$C`dOT8g$)1jk5y)EGqUNB{zRi8RGR9%wykY z8G%7be9hlj_v>LIE;i>cSqS1Ok%Q^;k}Of$I@Lin`5N}ZFProfA_~6$F;yGGIE~hf zR&eRFGLH}S*JY?)NQ^$-(@|r@OuNNzW`12^O`{rYIpq6UONB`%>1!Zq0;APxkT=U9 z5y%uEc_EHC8zg@wQdhHG9*K*Kfges#buWQxcjD4|x-}zGg1^NQ5h(T7V9Lcj`m^|T zQciNeb?)zay9MmK6;JYid{Q8Qi*vO*)=QTI${HBt*z?_p+SB~M)y%crT_uxKH1xc|UjlZRwM!m@(3;hRv=XgwT!*77;=l4w}b z(4{bI6IehrCcGL)N5jA2>I_gqccS zTKn0!?~WQBzvx&rBsv6acC+6gP(V*#E`iht1t|HjNA$V`d!)w|@_Ndjq zC&9@+i!By|&Us8XZvw$^eJC(^2=r4~-lmX*6~ zX&f83{fZL-QOgf>>xtX^{MP1}hZ`?h;ZDjw!I^3C-x9mj1QzXHjVUWN_O>?${j3Sh zT(*Yxl{>d5OcEJ;xe#X+3B+twv(XYTERFRm(UiE$gMDiPrn^zW1ZYDZZ5PT<(wBd~ zLE}s0=&vvE`(3$&4Tz0Ml&& zN$J6*eCdrBw$5KK!caG1Q4S(t!({Fwf^uM|d zz1m8`!ENX#T@k%)m|FlsMze3rtt*WMo;y!O>3A6zEetK?26A5OePxjVmpa1Fe z=eTm^g!p)N7qf+FZ*@o)l|2z^*yt9bbt@(Prgfor*;pKn^ac$I-lEape$MI2X1TZq ze-@SE6jo!ml7d$eI19>c0;{U-NtA}gNoYT0I?2x{dLe=pNm(3-ysYQ0M6O|&tlqy~MmRlDB}0a%MyX6YEfq15Tx^wY3&;WuhePz$TBcDYz8nH&J(># zxmz)gq3b$f97{Ooqwv&rCd;^EMVz@UiT2XB;(uR#waSq&Q0X{ci)-$Uy0KCHo8l|*=Kkj`@|*(;Z~T*??%-!ei7T|HHZx(bIdX3zC~x*@u^C1y258xX6I-12B;s}! zJ3lQUe;4*CYM%?xzbEk5^Frv%7QlWx&7?Z?o9ZI>S{FZ-}YNmKhCUz<(cX&s0v=l7$zya*12*2KME#lER>T!OMtg{#^I5V*RKZEr2k;0_` zg$2Z~i%V$xaZr_IpW_dssZr?Wr}Ew?zmH4PaP`N}JsH)cpOTXCporCn2j?+n5pvJD z)=bG=B*X}w{$z()exenTH25Lh;f5-7SPz@NG&=Il4rAyQ!=gm7VR)4=65qxYMh9Yi z5e}ua*y}ZdK(ZFJxHIR-9BFK#qsVOtOd(x)g*KnY_}w6}@q=i(op8c>YWdEIxW%+7 z4kKgB&ICrIiFt#scf?L7Iz>R+6Bw^=thT_yyN0;+G`RgDkv^I-Duu+0GdTrsD~e0u zT?Ng3ijg*v<(6NR)d={z@h74*9Egb&&eAiYphv?a8Q=8Gy;MiNGwYi$l|F-uZirRi4z&TW?=LmHoHHH6=59nnMcQgN=_N=i9!U zo>zj%|Lr{dqO#fMN8GZUT;c0i_xQzJsg+KrDFiwYSjOQyXRf*<${_&QiO0QFY$65D zCyAvnXeAkQGz~3$R<@sdB^9?0^^Xo~YLDmZj>e|r$w=qjmf-c@APLE3HE#7rz$pdH ztz6AlJ~Wqnf7YO9N4sAq6t*lFc7Z@z1C+}R6o zEH~qL(WYo=k!h!{GaKA+ZX@R-F>`o&cak-JevqrS9@@g0I z;w0EAOqV51FOD-Ad~BOOlh=7?ud}Jbd-plH=AC4mUPi4f?HZHN`e}#dW@U%L{{D?W zs!B=zT2XjOzikeb3P-mLSlCiz#mtJer-;~J)jvOPEmU6xiPeqz=_njnIlW3gfyU)z3qu`R1$LOFf*{oS#x*CIP*#0yj>+$brbl}J6?u!|it;lX`&Quve*MxLo2ccqmE`53SLM?kWkch@OQ z;^wn+C|)VLE!z{GJ~x%EEyxTKen{Kgh8%rMt~g-pC54y6^J~2fX#U(Tp4jdre>y^ha$|e-@Q5DYHc}7#X+pI`sHCx z>~y16hbhs8MO@8?&|Fc75PayfPH*F}&0m8#<9cg!-2Sv<00VqHL>;@nUzZ z(Eln$u)=Zg*em8JX!IW&s$2^PiJkbI%x61*9S8)b4g*c_*(xA z2#YhKL8JB<+Ga@~ifHpy9>a@ON!fRmoadr9;hiaKJ@hMlj%ehM)(R;ETt5%5 zbl`AJ5Hquw!q>sY)KgnMP2yNNyB?4pxjoVhY&K}z)XeXG%p-ul`h6h2t6cle;?0W+ zB;@=h^a`~U2Ej)7#qx-?^4`ATsr0CyB4s=;p4JD5v0j#{w1hY9`g((dSurZhazdIG zx&LHqe~te99n<}VAjbu7K9I5z?{ZKCd9Stfd^d%a`CGK<)fkUx+b^q?_88yPrR>rZ z^^hoy8~Dw+y215-YKPBbWS?X!N>2i#ixdpY+a}1vpV%@XcQ7~Z0^6Yp^vizMd3yg*w+GYDoe$0TVcM&`D~ z9DT*3SZlJ#=lw0|VuUr5U#T`I2?lPiD-cEs-!p%?C_$+1-Syd;p~DfoFN`Khm@EKf z?fHS$`_vg7z3jKS%4%SyP4$h`RxBt{CLw$>5Bh8MY@3>43WN9 zsfXGEAw76b*Ph5ovr|$LS2|L8DLz{7x59*Y-Yf}s2U@uW=`i--jgGOs0_I@`Zy|77%^R{pAQ#SuTFhkHh`0HOV#Zt zoAdKqAT^Wnwjl?^eQxt5I|22f zq!3o6^YQ)+SH!u%CR<&=6|o@%)rHTQQVovAeegmZ#Z7>^a5XhIlbYt zA0rcWYW3!QUwUwHDo1lBJH%%>mF|>SncqB>^Td<;epkN_97dMg(z;%`K-ALmz^TkL zGg}9Hq6WMAEDz4UYj0HN#&a#WrN?9=@ld|>3sntcDiUNrJ@EyD)zd2P3;I|d*Mku- zHqSSDrSNFc908!I9vF|&q}rt--W!PUT}(=LYAFL8zn<|dp`ER*?o6f86gC+%vk#Y7 za!s(JEIxd5$EndtOM{e)I+8`ZlUHvSLBEB!yO%jU1c>(iW8lyh5uNiJHthadzpE3d zo2u|&%UnBRSc`EnTkfPtV)UHczXZtSUd)_;|?j&V|MJ<`;G3XG^L`UrWb1Cm8 z>yEP2)~mu!sz#KK-0)Z+S}Y_YPf{>A_)=vU)DwmO4rPKnzw3%>(?)WaavM2A?}8W{ zb;kYhvlwe4RfptowgSl)_jTQ;FQEp1yg_vx;KBhmm+2O@^f~%gh>oBQ)?W6Y_kO`% z;CwckYKLM(t69Cr$9+AgZa6@&((X$le~hpNAS1gia~@1)|KT0)kE*tWdb1g095ox7JN9~0^=tuKraMkZD83p4 zCetX|hSsfd*MM?NXQ_03SDbPEPH~;OLv#WBMwln_M$i2IOcPhwer)_U;6fiBFaYe!-$!LtpX{gv4iB95F`_@t|X|EEbknG-OqSf7g8&> z?gaI;gEcsrC%jOleOLM<6-DWw2;Em!bav*~CzyJ(=vx&d2Lfb%Ne_f-LY;`mfuY~C ztzbJ^WQ+g{o^Ww2>NhPS=fW?U`>@2^1bw-EX{phfbEI)gwONMNvB!!cl4MqDe0f#( z1n^U)NZQMcuhIrwG>_bu>4hMK#yI+4AR)b__KXb}Jmg?)++|$~yJ^191#vQuHS7ed-8; zisW=@uaN!FsDc!YMmgr5yu{|sha2YI6|X@r3N=TJ3iK#T@OR^F*srOsAC7&lAJ^sF zzV6b1`Ix<(#+}Y>j^O{T96bLiBqVb*KnM?s7;*J6t6)lYqwH+%IlrqMy9nlwVZufL zb0r27`02aFVG2D+`~|;9K>x2?^U0t-GeY>t8La0Sqzc!c4P)v{ z3jTf%3#3M=FP~HTT*(r?=Se{M@zP|OK2<5knQwC&e+-;C@EJ#v{Zn`rbxGE~5RTly zVrNS=?hUV;5>P_w?YnHy#NRNO%>-fggKH1ifO3DJR}?O*@1nXME7Q-X(U}8GHshq> zw(tSpd}Wg5>o_Xrx%ht#x&j!k57fPya8{8J#|53r_rk8HE}5c!{^A0zW>p75cR4Rv&esMrM}J9j%cy(nQ|EfE&MlPdgz&0V-G@>nuA6O-;p)dnxI3WY6zF;6j2>WP*o$d&LcOCw`?77 zf-?s2T>mh2ghfzhHSa)N#Qr4oVl>>OzT%weDMYJTWd1YZpDNHVY6z_~;L+bGY2#GS zggo?>`_kuXSLZ+RM5nAZiur(ME>if_J=Dj=&YHilcg#xv4fIo5nUG!QDM`~H5)e3j z%DLiO^^)9OEg=6%MqJLVCKOm$LiGn1?UXw9KSO_pN-B3k2GA@DOy~Y2to~Mh;m~Ff z-Yy*tNS)KTo&M>BRd}i${nSQJ?o^e8oOPttt?P*Ml~}XRM>we-}) zLQc5_m53WL;Prt{(BqiCiHnP?kowtmLL?k0V>N&R*5!ck`_A7D&JYp992#C;jdFGR z9yuceM!-qG{WPvH)g+Bd^&P&Yi>CKLqH>8Ezate5UifbXZ{^Pbx0^I7H&)W~bkqWS z9+~cgMx>e~I!k@}u4ri-c+Y0smqi1nD;Gva@{F+}kxS?iN^G)AC3m7+Dm?2O36D*p zTHrnJ=Hvt3)Mo$fe?nSEdZwhXsYw;A(>iV9Qrd&+-s5ZE=V-;lFIEhe!qUpeYJn-F@^`3142&*6+Q2E*7{O00wfMx zp`EfGZV-7I;-v`|_rh}WnfBy(Pzop-4T;_g}&@SZd?|iF9u~bo*S~@2e z2LMI2`)Z^P5xUc$#VmeTsv=NWcdyXCmvgqV@axxIW@>8gpxQh(BfuA8_KNxUGsA-b z($m0+ETn7N1{b#<7$qAzy@vk#f{qK&;IcN;Y60K6%@mF%;wf8yLJ2IKB{Scjgs%FW zb!dtjw0qTPB7Qjo&_q;Ca&lUGmjoyER7QW)8J(nNDTDI2hdyzasiP#Yb!q{h5ePXn z?~u#52UdXP6BGVRO{(X4ilP4$VB-dWLh%5FUd5k%hur5V4#CjVJ<5h&1*5 z==S7(tvfg(>SE`8MU0ub`9|AWXYUV6$@=t7Wz`VTKOp0sO))8g0&mUCkV*OtF`AMh zL=+{g_>JE1t-G}VnU}|#od;)r604;~7CctH5>jQC9@c6Q9Y?!j^ClGs*3j6_k1qV^ z>kr~c%;d60>a4Htj{s3?5(@9gW=VO;s*^Z@>O6;XO$@eHDBRq`UQTQz4xOxFxvG+g zdO2M7*Fy#YuHq(3ny~jwKc*jz)IrxA|2l*n5=pi^5XN3yO|K5iESi^^cFUZTo83KI zAw}OrjAV?RVD*;!C!53J5&!Tf*1lQ7fwc^=83S3Ys#JfZL{z!ykR9u;Vjo=`~VpT`>dn@ZwkM~g) zV-XRE*0+tE`cZk`;uSwn$+!Bye6>HKZ!ft$UdXpk%2dC&=;Rip#rYCMvk9&BxpQcF z)`NZ_LD3ift6j1w1@jnctmrs@)3GH}npT;TO%RFcF>k_70{X7JaL@!EueubJe@JaZk$yAmH6DIgSoqqnP<( zhe|-{(ccIn?c{JFM(lSKp}1%$$8U%)l^PXVfwT(gLnqU@uo}4G;_q9AjNV>fR1obK z>p$zH0LYx78L9j*)h$Wjl1qg^LhKY~FU8uhe(Rp!!bO zs{~9apMl?S`dR?!`7Z^Z&h8*u*LT#ngh$~>HqQ5Z3fN_SV z5a2(=^#jm_N>b<-;C__yVsNn7@dALI3P$M^q!31(wEfgO;1H18;Bs33=zG5rzKGSO zz_$|ek1CTcl2#q>F9%X-{bI~WtvF%7+O7>@Rs7X)H7<17vX|wV9sRB!Ypvb;$_zU6 z_zxefa0+s8-5XD;TB$ace8>b!QP4j|cn+`{@?LcYGyPn^#AunJllP}pCJxir3J{ukZ5nFY5_MY&i)VJ$nzw; z|A**|li13OV(pP*>-Z3gAoU|NeP*3e&hIq!Ld@%(a>3+CpS6JEgmZe_9QC8a!;|C1 z&CK+)6R>@k4_ve6^BxrQ|Fk7GdVx8D&z*D%t9}7bB>Sa3fMqUJEq&Xg|2E?zk&6U3 zP?4@fQ*iU$-~6|U;?W=Avaqt5_>TiG-$J2*1ebFvDIUP=FWn>f!bJ~)Bw(!Qe|y;a z8v{Tr^;^Z9C`vI_L6FURfIL=XK8kk`wrTh`wl-kXuyy?d=YT0v32@Kifh;;K64jFv zLKhosN3Dg*tXCE?_PW>bhu1h(0ti`W`I@XBEkJ%@1VhDqUT)w z19e0>0&6&&7raQmzf5yYMg3~Eo_FgRw)Zt}&wjm)e01CfKK|sHT({jjLza>NX$z#u zBkjGn1zhC*NHm1dSdrUQH`m-$6J0632;l5A3_ITlqIgU+a2ck7J4Ha2a-NLn>P+O~ zc`bB5nyWS3S#up1kRjrim1sChOXqWBWnrq%p2XNU`zFCjO)dF);|ZX8FANn@va>5i zE~SbJF>u*p0B9kW=|FT`r^4$46$S}E1&e8W_5CAYRZ2)^7cG*C5;a&37~`4$^bRb> zBG|9G{AFLc!kOGSc znhk(3lZ-MF)a_qiuQm!56MCFq_?!dlG)zCsBSljT>)zd+6q@;dJ5-zi^=H7_>vR)D z_@WPIdW73;|BJxMKr>w?Rl&!ZVi@m@$*HWvOeRp1{SvKTOER-Il`8x*a97pFbgEx( zZHiuYBajc>ZR~BjQ9>iOZ2I0_pOn&d(<<{ZCC&p&39Xz>A4xc-+fP<~rfSuhtg)_g zmWpshWpY5lUu(xgLSPrXRsnYNG=B7%`2G*2xKa&71`r*5RmW|*8-~#m*l_w^ord1p zj2I5pr!V>EsvztPtAXqf41HB__OnH_9z;1#Yb7&ZX2K=?!HnJxEKZM#a%%4 zQ#o(O3AbXXHcTw|=cr?!s><`;=?uWmi^rDA(x@fEjfU46Wi6YTnR(%}85}DpDcvci zvWpua)T>>bK_$b@M1E@28zu3L1Qkw}qM~SMDSX6j_>esMP}O?KXfLO%%?^)`Pn5{* zyw4KwrV#7?b*HydbTKO+8g>^s{>c9A#7Gf)LSqM1sIP%3`ZkkODU5Na?y+O!Q5{J;(avZCr$opWm^ za18&8v1Ot|mk_r8hQ9B|Dn65akDyo8ehG1t%@K4jiv|SZ=)d0sH%fG3RM+KQ9I_=P z$xXGYB|x)?RV_3@4&^qTgk93qi_DKD^J@TaG%^?knB%Jyu%$^rySvFkf6rxNt0TZg_0y1JTs4fuREQg;sfm2jEbXl=n((@>oT^ zAj7XB#eg>YvS<;BxgQBp^FjEI1mI&tGnc3{f@X0<)c*j}RK^)yI7M=oo6PLH1aYpG zoYEr185eeJff+5V0Us0a0RZaOci?&_XexIu(0tbSTybAa z#u(V#8ZjPNGA(1yvKUJrp3RHS(OmvxE_Dw0#B5_B;Wh7ow;_$hEVa|yRjgX{A1d$H zIc=Rn3FVG|F1UifMzT!E_`AKPhj@>SoD{Usoe=SF2OkyJ=y%0TDJ0>sEco9QeK+yK z=!7@shuM8M`vN66*ThFa4S%*XemIn8Q)U z4zf!j!r;v0*c|_@;lxEV_UDDC-n}c>-#k*|ef@kE+}&01bh}3EIQi7vlP~mT7<|i4lHGqurI70A*|8pg2mu=x1LkQl4@6Y$`a2a!9tduvq_oma4!@sD)f9_X z+YtbhWq!>+H*G{DwJ^x_Dz|IyX>W!-uj+g(yZr>EH!REQs!fj#3#v{H-a}VWjP?( z;=5Gzs8FZJ*ec158^B?)2_VCgZUkRH&%I2|*kM&-7tlm$aEj(Ul?73I1K<`=Lw7D5 zhtr`dl%B0DscSJ&kXTi-9S$eY_*&X{qt&s-CVV1VfA*7Z3+m`;8z~ccn_)6;>~gTX zV|&5L&#!&TZjShuelm41%<2}4iomD^fcYo#P#&Ma~oY*ej3u{BdH+~Wn zKde8vki&?8+AF6X-#q3_ct8tEF3mGv%m|P-{eJSg0gt&6B0jf6=NP}qhj5eBX)Xk% zOxY!Frc~&A!~rG5k>?vwWqpgyJ6yaUPUe<4Q;4G?eODMvB?T)EF8q{Gl=}_sk7o5Z z$YW^)BBKW!Q(mqb169WzE$IQ2CNLzCd(~u7@`5Lx$6aB`6Et#5sBt3+Qi$zU_blX=*LmfzE)m+SuIAGbt1e(=%u&QB@Dq)O`h z_6NGXF{o>~=H&BI*|Z@G@no`c@4@A(AE?2!Trbb_B$sOxZ`dB?wB&Q!8ljA3GAq^K z&hmw}mjAOb&zmtEPkLu2(Zqdtk-eC#5351Z$zI-0?Ng8WLTusqyxc+C%HV;U-H4F0 zYGg#09ukR^uoN_wj3)g&d3pP9TSVp?EA;PO`j=xFdxncDq;1ir9KB-CugP^j!!=h* zEi4`;D6)DSp?Z55BZHg*6Xx3jrqliV#Be`O3jh{qS(1xTqdA`AQ5rh(FQqh2sYg}l zx64Agls|ZH-EMT_IH7mOXu(~K`mObg=}+tJ(OvTj7$1#NGbt$G=Ac|hR=bCL)(hjM z65M_={m2>x5tExBn^=mbUg@2(rIL{oUKd&&hCVXZCfx7zN_V_cQvizCKk zYd{R@f&3`6f^#|5l}8GQDZJMv90iZ1!uYznUHUXSw2S8dQiF8>!5SOSFXLCQ+>3rp zBzP(L;jjz%OX8Hn|LatiexvtW#InZw#-XmDN0fwlT}Cu;#cm>Oxy}OF;40b+;W7~> zBRRX-m-%*l5(vv|+aDEmArbhY1T`TcRp*${6CuJT(9~SVQ}Mu=ha(+5+dy*E(@z3U zA}Ve+{vvP6!Fj{IjnqxxRk)$_=>tziyoW?*1-9oi!O#7gBZt;;x10Zq4P><@$qp-Yh!LKn{iWI@>DSr2nq?&^n|W2A3^_FkZ1;=~ zapYpB4E6RV@olG;17Bb4?^arbi%OX{4OKx3sFhEw2GE_itl|hG;?T&!Zyzp*Ci-Y6 z5j^J$eV8m~Ru1_&Fe~*U`J%BGwT9Fc^-%RTmoq;%3&s_0o|t{7r>w-52stn`)Mal$ z|M}rAG#?Bd=~hQ%JCXgckb$22125L+@wot)+j6Rr-rq;wheU}ON;Y*RvKbo0AO)H& z0Df=-FL4W&@EN|M#UBpu4$vvsm097ne^bVM!GjmojzH{tb#hs&x5g3qSCtYHqwfH6 z(}Y5c|B6nH^m`2NZB7>I!ZoT4Sjtg8_H|KqNQ5F^1G1E9U~F;#^kL#+XT-te5V~GU z(usFD)TTOc--doMs+WB_B8Uk-V|jFnajMPXbIg|4fJ6)eRnf?amqJ7*F_5MI%@A5L z3_N6JJ>DGA74u){aPSlhT^nL_+}CjsmZX80G~_$OBkaIamIVXnd>8n9}DEd&f!Nr#r39rgmxdkPlLzPJL(8hu|=hRBbWw$m$7+JC(kVgxCw=IjhGtL%eyi6q~wAupaepg|e(-a%gIqd3Ao zOQD7BJ~>k;%)22j>lmZt9cJj8{aPDwRj7bn|=0auW@DB$ucy5*)9$UX%o>GWg*6J{iLYKf=9IsZxMne3S zy>`AeHVBfOvS-}q#{*g3!%&${XZsw5@A-0XcW&<7r}_n23;H?{WyYW^b&@5@TwNRH z3_+Rs0v!EnKc8V_-Mc07G?Txjf;~L>ze*AS&v9}FZdl5QQ_dHgQZ~Tg3&ha8PW=_r zJO7K#pwJ_F4~$0PzlTH!Qpp}d4jn1@n6eNiW)F>Ie#1BBs>@ZQOl^3tA9gSzIy!L^ zF)q~S?lh@N{H#~N^spK*KBx>)stPe%5Ue?O@{92&QpaQF@P(L3hj7Evs3>gHoqxrw z?t&%btLSoR#@>J0>Fr+w*J}G29=qF_xNa-s)9BCc7q#-b6yVfEr|h~24gG8>u`G{N ztN$k8iFX8lQ&dj(mY~7ZMDS^ABD-jJ?L}6~(Kh$?9HWV#FXrtP<>c)i2AM(QJ)Qi; z-ndE>VO_}=9;MaHiua@H6JmUyIZWzcKS#~xyF27Pl_x_XaJ?cz55w0Yg9qUqblWu* zCLcv`Qa-+UYTDZv4wJ+cT021A{C{A|Oo6*8CF%_*pIGb}N}L2-^4S=&kI`Ba;5KCu zS*feZY?KFwyB44DKb@kGCmViRNt zEJW!sJ4MTmDUeHHomNTC`BsP(sg>46%?GP-`eYOtHLocLkL;Y}rtchTYjZ8Y_cJF8 ztcxeGfBi0HFg*#LVB1+Dd zpj{<~5u0IhFu~k8bN(x2t$9OQKtbXd&T1wwt$ypT?`(pANi10rEC;`iWbcsk4sw#+ zD#S+Yt=7sX{=qzX(~}!IkmIYMsq__I&(@3Io|H$W(h~3fv-_JVb*L7Lik_5#LSm$P`8vv2tcFjg$t|h`6KCf}BN^sPuM-B=XG{th0%WTpK>}Yfe^SSf>q# zrs5&>80tRM>BXlF(MTlsg2ejHG3ULL+^(m9ZRl{YtWBKxZ;N-pUy-qNLsfyPt4rynWDHx z`E?Vcm#ur5m~q}>x;yfUPC~l>MQ}ScGWOvVwp}sx3>O=0GKml!_{WTCi>%x9-=f)4 zO&}7cxDfFreWAhGrQSHQ8Z+IU;%YqqbkuUV#WT)LXPyMftb9J{7>$f}zUC&6REAeZ zx%Rm;HAv8ta?MH7r@aJ0+yRKozzv(pL=8A&3 zhD~bZMuYiSQ>1nAX{Rc8Vz$lqI1xw}KY)WVwKX!DD^H!L#tY{gO&1#JBP$KCsQQn7 z0Fw#bcZ+GCndagM4HI~n<&i5wQ1(h)eu>*ed4p`d5OB+sqn2Yn5IPe)OsKe+=Q4-I zy)AitLk|60$R)_Ex#GaXR|_{2EauiJ31NLY3#}$u1m}fFbVnub!olx(p83o+oDTNjw#~@5wK>zdxc)$ zTul>f6;D5p!#>(6D_R~!8L9fCW73*M`uoi2JM6w*{cEGst@ zl^?Rl1ruqP*MIQ5wys@SO{nO1q?8LZ0hcV$o<8ohvwsw&_+Z$F?U|$(dq;cJTRLjk zb*Cs7LBod~M|LmsXw-7cMvKfqpS6Y}%mCIS@((!Pc0eCk))mwN@%c} z%F|={YUk#1cS9_>O;t(V>CgoO2UehxRYhp|O$6vu%Rtz9@>p0p%}&O%z1!S;YM_lx z`-B6lZTMPh>jDglviMCNokxYB`#^mhb?Xfe+eFOrDUsMv<-i{CX|YlK$QXVb7h5(P zt-_4M7SWVOM`%`6Cq4CSP!wLiwCb;d^`3=2)7rCO&;9E>@9Cz|6Bp%-2SV4$0QVaM z9nV8-fj*!2qq8$jPbujXT$#-zO8S;n37y)OTi8~v@NgQNht}K6H1OIBY9ENzLarP9)WMZ{Uf9zRTPEX~T@tF8xK>|R1M z5&E7=Tej8AK3xs|S!FN|G)E_xWUoHBe=^`)fXO_L3qJxPE@gCgpOlO!BN@iI#NOqw z=>3J!X5Y1RNTru}PQ}iu*^ElQo#}PKB_RWohK0qAvBKhDD-^x!!_BlQXl9IWY<{mH zSB>eBp*|{Ra(kLAs^x_(_vuq3gy{=Zj_+VU<_;+?EatB*oJQ3kBmv(FcwREA)1m+E zFs9)Q$B2iV-+jZV+}=yW)%)vyf~=umtiD<9%LJGeL0fiB_V!U^NTrNO+m4dVIn=t) zdM1K5Y65i=IXFS|BjrR=GY&@CkHseWHkWLYr&p@I+qFIpaw^@~wjq^fW;jZzVAW-P z9myegyWqu)VOHrFk~FHg50@-8Ln9(aHIG1(M2*QwyUpXL&Zrp0nHB}uj#EIUQma}~ z=0&bf;}=E3Hv#F6x$)h7Hmk(j7{tba!mJL_k79>5%S5y1PMI8fg@1 zX{5WP^DLhCI_LBG0WY7pHk%#yz1CcFjycAdSM02J$8Dog(`{ZKJ=!gm8fd*2$BTFN zn%aldOVC?_exn{m%wD(2oa2Zv^*U$=R`_=D9s@x$r>Rqa*S%AE z>Y$0_r_5!v!Ca%JU?{NamI&N@GHJj!A+`tZ=D1{0OhoQT7DF<`=#o);l&2q{n-M7o zo588nk&z;j-)7omO{pG#%DlC(mXKwApo`0pgVqh6=~jIyaa}P%WwvOkYMuyF2+eD3 zipqiFRXcq7WYD6h0Oiet)R8MrA5kkttQdVfe0N{s;i_fycq+YI zk+qyA6-azP%|w08OGqXYbu5f9^OKim#^6oinAW|e-p*X@E`6O@Yi)OL)@{&>HLGaK z&!g74i|d2SA0UXlikX%fh{-KEzuCCb_9;UkAuZda0EOBv`}H0GM4OCsfhbp$?>{xQo~0wri*+jzL{5a>ymkl6eTH z9Rm@akRx3ediCCI*!{%)10#9p4a%Ht1C9PqO!AML-cBA1W(h5|DF@qt0H9_|&e6Op z6`6WqIhw**x2QxJRvzhR_QxF$nQ5z^Rt1>_Ce9KICC@V?c=%qYa4#)`cm znaJS)MfgT3Gp8he5t9Bna-nt%CEnXdTpa;fzBDzC*D3Uz@6HIS#hqWp-}P`h9qCHW zx5(D%$;WLTg@k5NNS#kKiL&dQ1ZVhT z=$?$F)rnX!H<~Pc;tE?T27ZX0LDb)(NUL1Ca6Dh8oIKdnDPG8G&u2q z5ru8&5fyu5e%SrxMX)LSpmt4;&5g=Jx`JBXh;v6W=@dkd3kT)nrhr)}F%Ht(ET-+# zsKjO`)VXvarf;?SAB-m5FZSFY`;Bu8@H7e>NbgHo`k2Blri|@=?#5Cp-g>Tj^?gN= zh#ul!2I3w~qESx69lIs7x_%(@E<8T?$-APsIh>kqk#b6L+@^Ej>uMf{v>jjI-)kN? zh|t`;wq~1@+WSBiT0uoML_L!BHr3-0Q8>=UaZ&nJZs&QWVH{2d8&%a82BZ6*C_amy zzd#&f^`e5jbfPoAZMM=$#&A1P&{p0i)<-<4y&w2!!K$X$MpcSLcd*~#7Krnhs@QFp zMeBJOjjLG#ASoP4MuC{B+#j@0GKp?P4lIaxl?<7ysTrF?^w|Zc>jUMG_8 z_dmew=MsmNjuvWYoXI!MQuV0-i~v{wP;AF{9uU6qzgUR{&=Dy-A2Tav@TGqMS9o# z$DUv;40$$Z>W+)Qzq}e);=wZNwtF7L-(pG*^>-&`lq7WjSUmJnH|&+M&eBi4D>Nym z)w8hV9o!JP>#{y=!w+}mvo<4n8k<(Hhnpk0jHPB~c=ZJbzg@1%8EJ@CGNC~ptxn5U zDLFKe5#MF;Bf+4n2LV4;HJxo=umz*~FDpsP=)(?2$)oM)2s+yqsCqMXf*`6Wcgq0i zRj~d;So*YJS2h6e)!@h*Q^IR5{C4a1d}ll$nnF~$FXHZTX#=6(y5^apf~EwN!3=C{ z=IcingE5nKQW5xCwu(NKv+|oP^S7`MpY5H~3Z3dAu742P7(WrUHncrWV%H8t|K73+ z4v(mV=yQ&1S{=ACEp#Nz+lVX-MCK_hyczm3#wLT)VA!l~cN$Y$e)+q9Qmsz&SMT)5 zh}qa&UrV#(o`9z?9u`_r@A$`h2S-7WN8srADE<;gD|GgVU62!(fI(bY4*3$UlSk>9 zGG;BqdMQL}pDs0@Ds15(p!&|QDX`#DXEJ_m$lZgUf5u0yk@zaMQeL6;){deMmxVDO z6Vy@E#Sz}!?ln%yC@<-L1KLall+k@XinjpLTR-`h;q2%2M(qH3{&top8zrzA{5%~; zBcr)boG%>}-_vT=7k&=-8oz;;64hP1uX(?aZquSu#F+U5XnqWp(g&jxQIp-O{bR3I zE0Dz|<}?d5@ZE@$*%_elP27)cS>ye_h*832{>A@hNXk5INj{V5tD61;y*-qG`a5L)*Q>ha`Cy4- zA1LV}s|XYO>AF+wozp8?y9d2+c7@$Ei}I;I<}GLQ1hj6X1H1`$I|8-F7vCx6XMROo-+_#jy;rSAu~(lWrmL66bEh)b5M>oZGJoCGv4_8$AQej}O!NLGxzh zlZ_O)6jxaVj^C5QgI-J)UriPFPzAf7`NY?owzBj@T1$LBa7R7l@(1vShQ9E7hz@7* zpsN>Ji^?(GzS8H|1}C(-Ku;8z#9?fSdG`|vr)*J)G@9gtnovaRM2%h&Ab+0r-Ot#@-%mmo`#e%y`E!0;D=2sG@ zl=MM7BrM-fjWZuhoa_mV`A?hAf8ZtDb*YgMwSD~CH-*I&$7?esYiF5qkHzAZVj~r= zi_DSuRUXjF(uQ(^fo{Gu>)ii>R~RiH(?)FVDx}mvak97%;K_v_gXH9@pfzHe2XeO_ zyCzJ;YDO0+Sfo?tRnij=QO3Dtmg(iokuQBO(JOrl%2_huku>GSK-3gEgmyfCZ_VbBCqjX zmN!B5Z}W&oxsO34z{mTLV#3JoC+QJ$9ImLjUxflPT0)@#8S4J6ZYJS3GK1_%xIN_RAd*%Evgr! zgok-QbfYRJvm3MRPkelL)`d9Pd=e&KjXUuL5zPW6`+26z8~vJzMiZ*9L~sNX#3A_X zMv7ztF0qecfP=5zz8i2wrZMc6{Q$fD`D{au$ynsBYI5!;2Cr0!^TFb~w5DBFRIDLP zQL3g$;!q%38W)3TCnFBoNOS@cLNCu5%<;ke{ z`*%ZGHfO?aUhR;;NXyeA%KqEz$P`K_OQf8OYLm@oiOf$_&03Rw8pkzK<$0$!Y0~T~ zO0+$~RI9RRoP~#Wn#Be54m#SQPnLWwaq`{dbdwyh>r8X{Ldwts#8iCzo0^KGG*#92 zxl?OQM>s8y3$j&{o(v3a9CODzZ=qi3eZF_0#pI^3LG^)zsD~WEAGD6uW8_Kjbt zz4%*bM74qUi}gf-T-^hRI%!)0N!f__P8Qv32%>L|wJRJtZh~K65KCqXxW@1D-@}fD zFm?jgnC3+RM%CuuwfP@#%#S&cb0gRQJ3Y=)hVoat-j*aLnydH!y{Ah@A0g`J8h$ z$jqe9HPlupg<1IgG6T$h>FJ~8-OG) zf75CmKp3RjlbDCC>xuorB(f?{w#P+KjIFiRb$_BK2+fkjqzudYbxlkYBF@?#V5KEn zYqcf7y=`-of#rSwTkO`dyGOfNIa3pkPy%NxX1)Dvn7NGij-#k+5alBxoP^0-W$x6| zkxHrYekm6(e%`}<1MxLnj4R*E9UC(qSBFzy%}Rr~Uk<9L`VjN2U#Gi?{4-TxWY=SdQYrQYVpL9Ne0 z%jy#m|ClCL1zN)Zcv!spQ~UIOXu=X#lFjM<0T$6(+3CnwT1WIyv8orii|H_gzz^Kq zt%M^wNct|7Giz`)2*x-XU@%Yv^N1gB0eFV`mZ%06^@BLR7)eWnU;R?*ct@>uqH2=>2qx0I?M zZ?0njuU7sjKj5(Dz|>P4*x0`)q48jG(=+V2-DAF)E07UGX35OM#@oq#_mY9`2%X>H zbb?cuW~YaVYL{zCvrgo~<^ooWC3kN+6&=U#&BGZIzi^7%W5!O6x@=gy%gt(Mz4Z^s z{B4hHt&o(UF@=KQK&RyQ6>=Ht>DhCrJ(gKD2~KGcYLpq4?bDx$ zJf66Evz<~8-#_OHG`-F_x@w?-iq5?xr2K(5Cn4lxK*QD)AfI) z7}Hk&y?V$Bgqb1GtJAxnL4;?~ZeVIG6yW`w6W!EF8SNS&U~>t9*5PV-6m+_{u)Em0 zGS;iA58&e~Q}xNKo*$99_#85U4TvFCb*+zFrdnq%po&~xP!cf#r(HI^ zfFZ|D_@VeylKglytX+v!)R`tVOCZ}1`k|{@U0n{cuon4pWrKo-Vz%+{b%G6C6F@h> zt65-R8mD@wo<}xqD1BNwPlMRZMHQN$gcG)BeF0}favpCS{^vNBHRnbG1_@RpLj~@4 zKl7VM)HL%P3CDd&`gq0LAm!IpbOyfvoP7)RS{A&QNSeP)K``y(SQ zxJ6A2cO{8g$0CdphRXBlp+f`F)CMyTQTcdG=wIXq0;a@E49+4K zg7FrAG!M`Agic>nov`l6aNN9^tIE$cJW56zEt85cCX0FaM z!p3_3`gHyEcf-ipFWEk~a`vD3Nb3x9#9)<>#apTZW~02%Ey3%iLELR@k7|9Ne4hEW zjKAzao?v4u@70kHsKE-SCSG$7-5>ln(bnBD$WICyy+=eEf`*PWga``bUJKp)Zx!M}e<9``dWn0$~OuX7%7KeGk+J8p7mli1?C!R=WeV6eg*usVDGs zpDF6Vu;M^Xn4FmBJ>`>GdMX@RB$=T0{-4@>Qf7MPj1rG(z^X@cM`r=rQk{1H-j^yu zab7-US^kD4P|LqfIbH92eDXEqcRxsqS!5Rb6G7` zAmxkgQfq?^BBn$8MKV-@BpVKmT{eak=8=wlgw^q5FF%du^mUNQ85W?R3AbO)+m=aj z$zaxbBcU$4uLNU~mCX=>qy$r)Ifw6!lfoK1=``$_A(6ut2R`)~W<-`Cy7G?b{Yij} zr0?G#f{p9I#tyPVhgdH7%@Q!-lm2cs7Bl3mKZ;~zpYaDiPR5=&5$WV#f}npA;fMum zev`uemXeUQ`MU`P_Mx^^K4TM?VVPBU$tl6(wztp9Yas%mm<~$cqG}%8G@f8Y@{^G7 zY~*jXZT6;06Fxl3TQ%Ws{J=Ghqn1+7_*zXXSR9>_VFj=sMv;~0(~xyeM!FceZ#8+1 zln5+?Eb6x4M}bIH^Bg&Na5(&axc>Nk$zO8hO5` zuYp0vz32Ikn%(xV%ric=uL>ehK!CLAlg{ffBOOCtDJ3mEY;N9dTc(yDyhuSFslO}6gX{HK_pn2SeiK^~R;qGeINmh$5uz)iunCiEqM`Xq zS{eP=-Pqw=ogap()#Fn4n;#Lj^AKOtzW<T73@l8B7Rk{b@55dNi}uyf5y0!6mZH zR8+J!+ryoWlTTtQtt8t15%zM(y#^TC03dVd0yp%fN-*B+abZ)bn94Q3F_fUCvc(5tAf2RX_%-IF4Vk0^dg@ z4Qfm*FW4xB9lyfTx1Lw%me?VT;2qJWv>~!+UoXav&m!Zjc2lP&WHWZ`*ChT`re$4C zLv>@Qxidx!qtU| z-uja9h9iz#dt4p<%N&_tK}+^QzYg8c;CH5L5&f3dPk}b$s<7Gh)b~D)nBVEuf1TVU z!Bw_y*#?{G4Z~w||EkZ6uqfv~>eMWq|{x z7`8=X{tyF&_j#P60t|Cvb7;8{o$z@El;JG0=eH~h>`44vPEj7Fy7~^G_qU5hgSl?k zn8@&XhU@nTQZ!LP#Lj5|-Ya>nB3uMGQnD}WI7^8X$Amr3RaxoS{qC9jh2E9ilZURM z?zkI8cD0RhC@#ZlXf=oRHo%+hFSa<_MEHs&3O}4rI+5j3wKKB&{*Iv9{@j)j-$RSA zRu7Utm%S;*xv%45u*QV1yK}??rl7y#s<4u=*S$(GsS6>Tc(Ro8U2X($PlTh>-2NCo z2K|DBl5Qp^0KefU?=dQNJ6*FTxU(Sar}tkCxEN*zn2pnB0TzChL$>duJ5zuJ^uwPT z@s1Z6@Lch|IB*mek%mN4SKX~Z9cYtpf07Y?){Y?a;h_1B#_rt~wei?6Z9gI28d?ZU(P<~@#id}O z0UU>7ptmbYMk*K&xfKBsb8pC7H2_5;T_9!KZlST#hSMK`TIAw8u4;$Zjm684Ieyd47rF7GM0-$nAg_N1Q+sz!LRE*(W&3i?f4680+CY({Om z${1@`K>cOE3yET^0(N@Ej?tGdUORweLLTHo`Rax8?esXW(hzMRm(nY*mFM)SwS-W!#DC~;_IuX(EODYxZ{pPxS}5gZZ8kZJnr;Fy#2yB1Iki^!(3|#4tj?X^nUX@ zsA>c!^EJ0r3L{!FCTXkhXIZ1Z&{cA4VLD&JkqFpQ-G}K z^L3Q1I#JCe?6tC6}VOrQm2 z*`}7%b8<4x;C*Lr^79c0DoR0lRVSLi_&!X!-Rdi3RTD@bapMB?lz7TIi$lo+8?e#D zxORYV?rQ~0C$UCoP2}?|FX%J4KTXQsw$W7hHUx7Xz#`MUS*9r!@#**#Eza9E@GsC+ zS+RiC=Qp9`=Jz%dex*iO3@_>J|7@M>hyLedBJq7az&YnZVXviP zQy2dpS5AuiPS=-nX-JqAf*Ka9wUH0kzxWb8fnfZ&K-CNP-<7OS*$OK+dP0+b14{Is zv^$Xy+cZu<;@>tITuG}hA8Osq&4h8yvPd_+-8Gu#=^X!p2? z_jQh7xiPU!6qDQZqk!MKdajB+s!oA6a00BO&6wYR9ZQV*jaRRykV{TPZ?&Pt?Sj=7j7tYjZ=Ki^*^6rYp zkDA{KdYqT+$)SY9%$Do0-!wnmUCsW8YMf6azBV)!rK+h1KI#B@&KISi@9U6$=UszI z%9j&G#4SxC_^E;voB}-;7nfG1)CuS@|AxmBT6HFI#y~d#5V8ubCTw65 z`F$4$dOo}kEOHj3Mp1$8YSDOutxki^Ojj)jUYNDewff&1K&2^dynSJMf6@)CIb-6B zk0vAtjGKeKfVS019{UJe@eyfqbXIDrv;Ek6yy5^gB=ja{Xul73RjL?^@MJ ze9i0a6J(l1Ao8N_4W+@dXantkT_om@l)#-&+~R?!U=v#Asp;fN67Vrh-QpCwSC`P%!RQw?&gX4X=;;|RL!`8pKtR6a6%3<4`ezV0WAu4vre`+*@NN1uLu-nBGYT?oh-wmg63ObLujay1S zH=6{|It#fo+a3VeZFq^ZvZeH6XTsB~2b-oGz_5|r>RK2)_tYAY5{#dfS=OM>TZZSh z>ahZP*3R;pqmhPYwHTZx}yO@DN*U zEa9%xeNZv-KVAl%0xK&lgjG-U&YG>C>E+}rHC@BhgAai<#=71U`Jm)<5?l(qrNUHmhUu+g#NR=qa0k%^Oe^&G>wo2H~-`wiYB{_6@J?Fv*2v3D^vB zij>mJz-gS)1vAOb3ayXbXQ@dM3q;g@185;w&vKmmzsF#J{Kc&dAja=D%o>eo7MV}! z1s1a8$Ki9hz5y^d^iP#llsRGWkR4xmEQ*V+i6yrqTnH`l469++pTketrI=2rxV-)U zfP05d@Yl5rRS{}I=0Du%y(uW*!~Zt*Pf{>(^yB9Lq3q_y=Y4-VFc-f9&u{UZM+A|_ zUtJupWIJndq9<&d`u8CSu++c>rlb&tM@J`%s0-9EbQ_}*O-@G42XI7$hsWO$iz;6m zb;psxMVk`RiJIc-fvVO9wP>T zz0_&q-^YR8yTKm`$K2Tk$L!mkx~TLH$ULXMe|DH2Cgqb}>G0OtCk7j}=Dd)L%k9Jj zjO$?n;0mKs2A^63xI=Ix$F&@yu>`D+it{yyBS@VOmn6@T?v{tjRuGkFU(GiZM)I$D zVtfn8Mm40CA0F?H>oSzzsk7{?-XNh9is@7t#2xNV7LSbf%fyf;1=0PDaRsHb-cL{P z-sd1<4L*E>i_h*sLN7!C?{@~LB6NxXoC*|Qpl!NR5>j~VQ0eWY4N=r|$sw04Rwsp0F|OjgbwujpBK1J zU=NdLQ7eXknz*bXaS^Z|c%JzjD^HIy0MSW6%;rX;hIuxnn3gB`rq0)e6XNmZfBuaE zw4q^G#I1g1#L?}I!i;OSxqW1ua6&EkAa?sA7~GfR39yl%El!0XT}4K#V1Yw&7LKkLQ2)Ghb z?Qb{6Brtk_Szx*BKfgY1crMNYs{Q}p@mh>mteS8s| zkg>3Eyp9sh(s)wlS`~1j7wpUAIPd(XD%1N`l$2B{Io8f|1#e>wb#$zuLx={b9zX?B)^x#nJ}IBfQVJ!1_ba3HS+?>W=)tvO zihwJdJQ@m$8mrO*Kmli3U2Q83zkjMDd-*c|0o?erKW<9Eyr;v53deG#KrUg!2dsGG z`Fd*_-(!$CXbX4ngVKGm+Iq66Y(P}x@!Fh~i%Y$MYXGR<>9a}9U7^6kUA^*V49p9d zf*$$Jo`aO36#r#t4I*$fnfC1$GaEh``_Bds3i~_YKVpqMc4?ulE~SwbftwiC zG+iLup`nPFeFD}UlfqYvG$Sw|GIVwylcR_LqxU=yuYglZfG_rniv!i+&pHM8y^1_P z5$f7W>ih=N7*hUt>siQFzdxcjJ_;I|hAnv;5~RRWj0s8d(KH-WEU-XE8&%v~0B4uR zC6*p->v-Q37mV7wv2qGWrhzEu=H{LQ$Nw^NERF#ZaYr#9N+Gni?64avJQj-N9Sp9ugi36&C=Eod6SPoo;x-N;As_PJ7)kY0zVuFTO}2Y{r|G&R`HMqeH+m3yQbNEvXX@|+AHp)CLlfC^~Zc?g$? zbSQJf7OaTQ9}y63=J4SI>0V#~wA$VEc>7`&3C-?WZ}%>Y!ZO@k7$l2n&HvOZZC#EQ z^=yGq%V5^K+>7dnuHipI-u6ug7^ydX2ICtHf=4NSFUL(1xco!lquo`LqOICmN`{tg z)LL};!A{Ihh8|6#oO3E86U-^g%gb|Ok^$JbG3Z)Vm^@pX3JUh|b55(A!ACU|754FK&fOD?+DpH;D4Y_7F&hx9N{*Zw`e1(rso$wh#OAp7!* z8rb&8R5vN#tj31lR1ru-4vVx=xQPb+NwlMy+PI56gz=W9r~pHK--8rLos|w^f~GC7 z^I6r}|2yI}6VY*K8~6A8&}~46sf2<9OTM$WYbnW|^ULlH0QyB(Wkb<<9$&9LWSm4z zB*d*9p{)MG+H%j}t{aCh2-EHGYB3nFoi=7CHwJzZOa{$0ANDED5VyB73#RV8dz?*7 z3Ag%T#yx7xP&jivged2A*)0wZ6%c)uB`H*ZGXpe8iu4UQK3nQcADnG zu|+)(1#+-wyPRoikJCPa-5Alw*koqET2i&BP`@AldgMb~&|$`6u80;~Impd}5wy#J zm5R}AI;TH88Vf}iHYu(355u4jz18xIuprm7yC39udIVt5AP%>H8|fu%D+S*0yveE` z3Q>m~L|=B!4;FDX1o0(rMn7i*k{Wy*q9Zyo$Gt3e8X8#k=-Ri=tNc-g?k6<8{Jlhm zpGyX6!PEW*`J?)(O%z*>B%jr~8A1$hO&ZPmQ2F`RIQeFtd8+vgI2CaBC9b}x4_>h3 z0qd&|y#l=)ye~lbw`Y1%jEq74a*Eimg8Zr=qMOGAxDRb!7sa?Pi@EJCJR{qP5OutQ z?TJ-1#Xi^#gmc7x1I3@F^L60h=X#IK+usc9^LVp8ZB*KQ9g1i!?6)pz)3&C2t9#u? zU_8jFV|BFHBC?OwLuE{mamyFOIZa}q5i5^mmV_dtYjOi48VZ01byVaTX5)_wtiljT*wC5gxtg7&JK|NBa@! zgC3d$`cgv_tVc!5KsL`fcw*lB=^$kLhco;&|0*eGP5aGO`h-(Z26tpC zST=j59>g??gg04Kb4dpV4kv~TC{~5=x1LS~R^P%<4z8{TEgJU2V#kCSOMqCj={d+T z8UAl zd3Y%AiB^Cw`?3`sOx(+$=cFY;+=YTA(|jOWZiNzLh2B+xlbsD8oUW8s5xz*gkJ$d*eNK6w$ni23-)(ty)nr;8r9!mkORhyK<}d6S z9^I4Ahx3IV!r1E`(c+5xyBZ0yu(>)TFewdU^Qgk=kZ*`5S6>P#swkzOJ4{v_u|@;P z)JUgBFb|8~?Jq_{DJQVm$lS2F%zv8KVaNP>{(OW_e*khAsmlziheoS0ayyWl*hCaP zT(8GWLX1 z3_I1oPEO^v8Xh3@pSIbbspJ}l#JQF5{6&*oAu6}tS@pVDr#U0m+jlidnrIRRlf%kp zdx4u3-Cb19LqCZ_iMBi_))||80Tjzs21(rx!62b)qKbF`xF};&@5GDpEQs3Bcu2|o z$LkZ*7IyN04tONYf@`6aMH=L%Ff4%W2b?$W_{Z2=;r&&@JQEnPt2YJSAdPtcc> zji6kHzkc-aC09J^f{S?mGKBpWe~|v~Z~s#ifnw~rd;8yC6F{8>vVRVAb1s&e=cfmo zDh4Bh^AkK4IaJg2-#rfIBm(rZ{~n|3{eLg}zc2Q`*NpK0cWG!)-TK0=*j+i@Sr5a2 OA2}&y$tnqxFaHOlX(ccK literal 0 HcmV?d00001 From a71563bd7ef40eeeed94739a453599719950b6b7 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Thu, 22 Oct 2020 11:24:54 +0800 Subject: [PATCH 37/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 8603f9deec..39c2a91372 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -4,7 +4,7 @@ One-shot weight sharing methods have recently drawn great attention in neural architecture search due to high efficiency and competitive performance. However, weight sharing across models has an inherent deficiency, i.e., insufficient training of subnetworks in the hypernetwork. To alleviate this problem, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. We directly select the most promising one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. For more details, please refer to the paper (coming soon).
    - Cream
    + Cream
    From c00c58eb0965121b4b644189ea973f0da13cce54 Mon Sep 17 00:00:00 2001 From: mapleam Date: Wed, 18 Nov 2020 11:56:59 +0800 Subject: [PATCH 38/62] version 1.0 --- examples/{nas/cream/models => }/__init__.py | 0 examples/nas/__init__.py | 0 examples/nas/cream/distributed_test.sh | 4 - examples/nas/cream/distributed_train.sh | 4 - examples/nas/cream/flops_table.py | 77 --- examples/nas/cream/lib/config.py | 121 +++++ examples/nas/cream/lib/core/retrain.py | 136 +++++ examples/nas/cream/lib/core/test.py | 98 ++++ examples/nas/cream/lib/core/train.py | 230 +++++++++ .../cream/lib/models/MetaMatchingNetwork.py | 167 ++++++ .../nas/cream/lib/models/PrioritizedBoard.py | 137 +++++ .../nas/cream/lib/models/blocks/__init__.py | 2 + .../models/blocks/inverted_residual_block.py | 113 ++++ .../models/blocks/residual_block.py} | 17 +- .../lib/models/builders/build_childnet.py | 181 +++++++ .../lib/models/builders/build_supernet.py | 230 +++++++++ .../cream/lib/models/structures/childnet.py | 145 ++++++ .../cream/lib/models/structures/supernet.py | 206 ++++++++ examples/nas/cream/lib/utils/builder_util.py | 273 ++++++++++ examples/nas/cream/lib/utils/flops_table.py | 83 +++ .../nas/cream/lib/utils/op_by_layer_dict.py | 42 ++ .../lib/utils/search_structure_supernet.py | 47 ++ examples/nas/cream/lib/utils/util.py | 178 +++++++ examples/nas/cream/models/builder.py | 392 -------------- examples/nas/cream/models/hbuilder.py | 417 --------------- examples/nas/cream/models/hypernet.py | 307 ----------- examples/nas/cream/models/model.py | 159 ------ .../cream/{requirements.txt => requirements} | 3 +- examples/nas/cream/run.sh | 6 - examples/nas/cream/supernet.py | 389 -------------- examples/nas/cream/test.py | 482 ------------------ examples/nas/cream/test.sh | 2 - examples/nas/cream/tools/_init_paths.py | 24 + examples/nas/cream/tools/main.py | 61 +++ examples/nas/cream/tools/retrain.py | 317 ++++++++++++ examples/nas/cream/tools/test.py | 157 ++++++ examples/nas/cream/tools/train.py | 250 +++++++++ .../pynni/nni/nas/pytorch/cream/mutator.py | 66 --- 38 files changed, 3215 insertions(+), 2308 deletions(-) rename examples/{nas/cream/models => }/__init__.py (100%) mode change 100755 => 100644 create mode 100644 examples/nas/__init__.py delete mode 100755 examples/nas/cream/distributed_test.sh delete mode 100755 examples/nas/cream/distributed_train.sh delete mode 100644 examples/nas/cream/flops_table.py create mode 100644 examples/nas/cream/lib/config.py create mode 100755 examples/nas/cream/lib/core/retrain.py create mode 100755 examples/nas/cream/lib/core/test.py create mode 100644 examples/nas/cream/lib/core/train.py create mode 100644 examples/nas/cream/lib/models/MetaMatchingNetwork.py create mode 100644 examples/nas/cream/lib/models/PrioritizedBoard.py create mode 100644 examples/nas/cream/lib/models/blocks/__init__.py create mode 100644 examples/nas/cream/lib/models/blocks/inverted_residual_block.py rename examples/nas/cream/{models/resunit.py => lib/models/blocks/residual_block.py} (87%) mode change 100755 => 100644 create mode 100755 examples/nas/cream/lib/models/builders/build_childnet.py create mode 100644 examples/nas/cream/lib/models/builders/build_supernet.py create mode 100755 examples/nas/cream/lib/models/structures/childnet.py create mode 100644 examples/nas/cream/lib/models/structures/supernet.py create mode 100644 examples/nas/cream/lib/utils/builder_util.py create mode 100644 examples/nas/cream/lib/utils/flops_table.py create mode 100644 examples/nas/cream/lib/utils/op_by_layer_dict.py create mode 100644 examples/nas/cream/lib/utils/search_structure_supernet.py create mode 100644 examples/nas/cream/lib/utils/util.py delete mode 100755 examples/nas/cream/models/builder.py delete mode 100755 examples/nas/cream/models/hbuilder.py delete mode 100755 examples/nas/cream/models/hypernet.py delete mode 100755 examples/nas/cream/models/model.py rename examples/nas/cream/{requirements.txt => requirements} (79%) mode change 100755 => 100644 delete mode 100755 examples/nas/cream/run.sh delete mode 100644 examples/nas/cream/supernet.py delete mode 100755 examples/nas/cream/test.py delete mode 100755 examples/nas/cream/test.sh create mode 100755 examples/nas/cream/tools/_init_paths.py create mode 100644 examples/nas/cream/tools/main.py create mode 100755 examples/nas/cream/tools/retrain.py create mode 100755 examples/nas/cream/tools/test.py create mode 100644 examples/nas/cream/tools/train.py delete mode 100755 src/sdk/pynni/nni/nas/pytorch/cream/mutator.py diff --git a/examples/nas/cream/models/__init__.py b/examples/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from examples/nas/cream/models/__init__.py rename to examples/__init__.py diff --git a/examples/nas/__init__.py b/examples/nas/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/examples/nas/cream/distributed_test.sh b/examples/nas/cream/distributed_test.sh deleted file mode 100755 index dbd5d83459..0000000000 --- a/examples/nas/cream/distributed_test.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -NUM_PROC=$1 -shift -python3 -m torch.distributed.launch --nproc_per_node=$NUM_PROC ./examples/nas/cream/test.py "$@" diff --git a/examples/nas/cream/distributed_train.sh b/examples/nas/cream/distributed_train.sh deleted file mode 100755 index cdd24647b5..0000000000 --- a/examples/nas/cream/distributed_train.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -NUM_PROC=$1 -shift -python3 -m torch.distributed.launch --nproc_per_node=$NUM_PROC ./examples/nas/cream/supernet.py "$@" diff --git a/examples/nas/cream/flops_table.py b/examples/nas/cream/flops_table.py deleted file mode 100644 index 0cc597c6f3..0000000000 --- a/examples/nas/cream/flops_table.py +++ /dev/null @@ -1,77 +0,0 @@ -import torch -from ptflops import get_model_complexity_info - -class LatencyEst(object): - def __init__(self, model, input_shape=(2, 3, 224, 224), device='cpu'): - self.block_num = len(model.blocks) - self.choice_num = len(model.blocks[0]) - self.latency_dict = {} - self.flops_dict = {} - self.params_dict = {} - - if device == 'cpu': - model = model.cpu() - else: - model = model.cuda() - - self.params_fixed = 0 - self.flops_fixed = 0 - - input = torch.randn(input_shape) - - flops, params = get_model_complexity_info(model.conv_stem, (3, 224, 224), as_strings=False, print_per_layer_stat=False) - self.params_fixed += params / 1e6 - self.flops_fixed += flops / 1e6 - - input = model.conv_stem(input) - - # for block_id, block in enumerate(model.blocks): - # self.flops_dict[block_id] = {} - # self.params_dict[block_id] = {} - for module_id, module in enumerate(model.blocks): - self.flops_dict[module_id] = {} - self.params_dict[module_id] = {} - for choice_id, choice in enumerate(module): - flops, params = get_model_complexity_info(choice, tuple(input.shape[1:]), as_strings=False, print_per_layer_stat=False) - self.flops_dict[module_id][choice_id] = flops / 1e6 # M - self.params_dict[module_id][choice_id] = params /1e6 # M - - input = choice(input) - - # conv_last - flops, params = get_model_complexity_info(model.global_pool, tuple(input.shape[1:]), as_strings=False, print_per_layer_stat=False) - self.params_fixed += params / 1e6 - self.flops_fixed += flops / 1e6 - - input = model.global_pool(input) - - # globalpool - flops, params = get_model_complexity_info(model.conv_head, tuple(input.shape[1:]), as_strings=False, print_per_layer_stat=False) - self.params_fixed += params / 1e6 - self.flops_fixed += flops / 1e6 - - # return params (M) - def get_params(self, arch): - params = 0 - for block_id, block in enumerate(arch.keys()): - if block == 'LayerChoice1' or block == 'LayerChoice23': - continue - for idx, choice in enumerate(arch[block]): - params += self.params_dict[block_id][idx] * (choice is True) - return params + self.params_fixed - - # return flops (M) - def get_flops(self, arch): - flops = 0 - for block_id, block in enumerate(arch.keys()): - if block == 'LayerChoice1' or block_id == 'LayerChoice23': - continue - for idx, choice in enumerate(arch[block]): - flops += self.flops_dict[block_id][idx] * (1 if choice else 0) - return flops + self.flops_fixed - -if __name__ == '__main__': - from models.hypernet import _gen_supernet - model = _gen_supernet() - est = LatencyEst(model) - print(est.get_flops([[0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0]])) diff --git a/examples/nas/cream/lib/config.py b/examples/nas/cream/lib/config.py new file mode 100644 index 0000000000..8bd67b6ef6 --- /dev/null +++ b/examples/nas/cream/lib/config.py @@ -0,0 +1,121 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from yacs.config import CfgNode as CN + +DEFAULT_CROP_PCT = 0.875 +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) + +__C = CN() + +cfg = __C + +__C.AUTO_RESUME = True +__C.DATA_DIR = './data/imagenet' +__C.MODEL = 'cream' +__C.RESUME_PATH = './experiments/ckps/resume.pth.tar' +__C.SAVE_PATH = './experiments/ckps/' +__C.SEED = 42 +__C.LOG_INTERVAL = 50 +__C.RECOVERY_INTERVAL = 0 +__C.WORKERS = 4 +__C.NUM_GPU = 1 +__C.SAVE_IMAGES = False +__C.AMP = False +__C.OUTPUT = 'output/path/' +__C.EVAL_METRICS = 'prec1' +__C.TTA = 0 # Test or inference time augmentation +__C.LOCAL_RANK = 0 +__C.VERBOSE = False + +# dataset configs +__C.DATASET = CN() +__C.DATASET.NUM_CLASSES = 1000 +__C.DATASET.IMAGE_SIZE = 224 # image patch size +__C.DATASET.INTERPOLATION = 'bilinear' # Image resize interpolation type +__C.DATASET.BATCH_SIZE = 32 # batch size +__C.DATASET.NO_PREFECHTER = False +__C.DATASET.PIN_MEM = True +__C.DATASET.VAL_BATCH_MUL = 4 + + +# model configs +__C.NET = CN() +__C.NET.SELECTION = 14 +__C.NET.GP = 'avg' # type of global pool ["avg", "max", "avgmax", "avgmaxc"] +__C.NET.DROPOUT_RATE = 0.0 # dropout rate + +# model ema parameters +__C.NET.EMA = CN() +__C.NET.EMA.USE = True +__C.NET.EMA.FORCE_CPU = False # force model ema to be tracked on CPU +__C.NET.EMA.DECAY = 0.9998 + +# optimizer configs +__C.OPT = 'sgd' +__C.OPT_EPS = 1e-2 +__C.MOMENTUM = 0.9 +__C.WEIGHT_DECAY = 1e-4 +__C.OPTIMIZER = CN() +__C.OPTIMIZER.NAME = 'sgd' +__C.OPTIMIZER.MOMENTUM = 0.9 +__C.OPTIMIZER.WEIGHT_DECAY = 1e-3 + +# scheduler configs +__C.SCHED = 'sgd' +__C.LR_NOISE = None +__C.LR_NOISE_PCT = 0.67 +__C.LR_NOISE_STD = 1.0 +__C.WARMUP_LR = 1e-4 +__C.MIN_LR = 1e-5 +__C.EPOCHS = 200 +__C.START_EPOCH = None +__C.DECAY_EPOCHS = 30.0 +__C.WARMUP_EPOCHS = 3 +__C.COOLDOWN_EPOCHS = 10 +__C.PATIENCE_EPOCHS = 10 +__C.DECAY_RATE = 0.1 +__C.LR = 1e-2 +__C.META_LR = 1e-4 + +# data augmentation parameters +__C.AUGMENTATION = CN() +__C.AUGMENTATION.AA = 'rand-m9-mstd0.5' +__C.AUGMENTATION.COLOR_JITTER = 0.4 +__C.AUGMENTATION.RE_PROB = 0.2 # random erase prob +__C.AUGMENTATION.RE_MODE = 'pixel' # random erase mode +__C.AUGMENTATION.MIXUP = 0.0 # mixup alpha +__C.AUGMENTATION.MIXUP_OFF_EPOCH = 0 # turn off mixup after this epoch +__C.AUGMENTATION.SMOOTHING = 0.1 # label smoothing parameters + +# batch norm parameters (only works with gen_efficientnet based models +# currently) +__C.BATCHNORM = CN() +__C.BATCHNORM.SYNC_BN = True +__C.BATCHNORM.BN_TF = False +__C.BATCHNORM.BN_MOMENTUM = 0.1 # batchnorm momentum override +__C.BATCHNORM.BN_EPS = 1e-5 # batchnorm eps override + +# supernet training hyperparameters +__C.SUPERNET = CN() +__C.SUPERNET.UPDATE_ITER = 1300 +__C.SUPERNET.SLICE = 4 +__C.SUPERNET.POOL_SIZE = 10 +__C.SUPERNET.RESUNIT = False +__C.SUPERNET.DIL_CONV = False +__C.SUPERNET.UPDATE_2ND = True +__C.SUPERNET.FLOPS_MAXIMUM = 600 +__C.SUPERNET.FLOPS_MINIMUM = 0 +__C.SUPERNET.PICK_METHOD = 'meta' # pick teacher method +__C.SUPERNET.META_STA_EPOCH = 20 # start using meta picking method +__C.SUPERNET.HOW_TO_PROB = 'pre_prob' # sample method +__C.SUPERNET.PRE_PROB = (0.05, 0.2, 0.05, 0.5, 0.05, + 0.15) # sample prob in 'pre_prob' diff --git a/examples/nas/cream/lib/core/retrain.py b/examples/nas/cream/lib/core/retrain.py new file mode 100755 index 0000000000..ca234848ab --- /dev/null +++ b/examples/nas/cream/lib/core/retrain.py @@ -0,0 +1,136 @@ +import os +import time +import torch +import torchvision + +from collections import OrderedDict + +from lib.utils.util import AverageMeter, accuracy, reduce_tensor + + +def train_epoch( + epoch, model, loader, optimizer, loss_fn, cfg, + lr_scheduler=None, saver=None, output_dir='', use_amp=False, + model_ema=None, logger=None, writer=None, local_rank=0): + batch_time_m = AverageMeter() + data_time_m = AverageMeter() + losses_m = AverageMeter() + prec1_m = AverageMeter() + prec5_m = AverageMeter() + + model.train() + + end = time.time() + last_idx = len(loader) - 1 + num_updates = epoch * len(loader) + optimizer.zero_grad() + for batch_idx, (input, target) in enumerate(loader): + last_batch = batch_idx == last_idx + data_time_m.update(time.time() - end) + + input = input.cuda() + target = target.cuda() + output = model(input) + + loss = loss_fn(output, target) + + prec1, prec5 = accuracy(output, target, topk=(1, 5)) + + if cfg.NUM_GPU > 1: + reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU) + prec1 = reduce_tensor(prec1, cfg.NUM_GPU) + prec5 = reduce_tensor(prec5, cfg.NUM_GPU) + else: + reduced_loss = loss.data + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + torch.cuda.synchronize() + + losses_m.update(reduced_loss.item(), input.size(0)) + prec1_m.update(prec1.item(), output.size(0)) + prec5_m.update(prec5.item(), output.size(0)) + + if model_ema is not None: + model_ema.update(model) + num_updates += 1 + + batch_time_m.update(time.time() - end) + if last_batch or batch_idx % cfg.LOG_INTERVAL == 0: + lrl = [param_group['lr'] for param_group in optimizer.param_groups] + lr = sum(lrl) / len(lrl) + + if local_rank == 0: + logger.info( + 'Train: {} [{:>4d}/{}] ' + 'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) ' + 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' + 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f}) ' + 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ' + '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' + 'LR: {lr:.3e}' + 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format( + epoch, + batch_idx, + len(loader), + loss=losses_m, + top1=prec1_m, + top5=prec5_m, + batch_time=batch_time_m, + rate=input.size(0) * + cfg.NUM_GPU / + batch_time_m.val, + rate_avg=input.size(0) * + cfg.NUM_GPU / + batch_time_m.avg, + lr=lr, + data_time=data_time_m)) + + writer.add_scalar( + 'Loss/train', + prec1_m.avg, + epoch * + len(loader) + + batch_idx) + writer.add_scalar( + 'Accuracy/train', + prec1_m.avg, + epoch * + len(loader) + + batch_idx) + writer.add_scalar( + 'Learning_Rate', + optimizer.param_groups[0]['lr'], + epoch * len(loader) + batch_idx) + + if cfg.SAVE_IMAGES and output_dir: + torchvision.utils.save_image( + input, os.path.join( + output_dir, 'train-batch-%d.jpg' % + batch_idx), padding=0, normalize=True) + + if saver is not None and cfg.RECOVERY_INTERVAL and ( + last_batch or (batch_idx + 1) % cfg.RECOVERY_INTERVAL == 0): + saver.save_recovery( + model, + optimizer, + cfg, + epoch, + model_ema=model_ema, + use_amp=use_amp, + batch_idx=batch_idx) + + if lr_scheduler is not None: + lr_scheduler.step_update( + num_updates=num_updates, + metric=losses_m.avg) + + end = time.time() + # end for + + if hasattr(optimizer, 'sync_lookahead'): + optimizer.sync_lookahead() + + return OrderedDict([('loss', losses_m.avg)]) diff --git a/examples/nas/cream/lib/core/test.py b/examples/nas/cream/lib/core/test.py new file mode 100755 index 0000000000..69d4dabbf0 --- /dev/null +++ b/examples/nas/cream/lib/core/test.py @@ -0,0 +1,98 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import time +import torch + +from collections import OrderedDict +from lib.utils.util import AverageMeter, accuracy, reduce_tensor + + +def validate( + epoch, + model, + loader, + loss_fn, + cfg, + log_suffix='', + logger=None, + writer=None, + local_rank=0): + batch_time_m = AverageMeter() + losses_m = AverageMeter() + prec1_m = AverageMeter() + prec5_m = AverageMeter() + + model.eval() + + end = time.time() + last_idx = len(loader) - 1 + with torch.no_grad(): + for batch_idx, (input, target) in enumerate(loader): + last_batch = batch_idx == last_idx + + output = model(input) + if isinstance(output, (tuple, list)): + output = output[0] + + # augmentation reduction + reduce_factor = cfg.TTA + if reduce_factor > 1: + output = output.unfold( + 0, + reduce_factor, + reduce_factor).mean( + dim=2) + target = target[0:target.size(0):reduce_factor] + + loss = loss_fn(output, target) + prec1, prec5 = accuracy(output, target, topk=(1, 5)) + + if cfg.NUM_GPU > 1: + reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU) + prec1 = reduce_tensor(prec1, cfg.NUM_GPU) + prec5 = reduce_tensor(prec5, cfg.NUM_GPU) + else: + reduced_loss = loss.data + + torch.cuda.synchronize() + + losses_m.update(reduced_loss.item(), input.size(0)) + prec1_m.update(prec1.item(), output.size(0)) + prec5_m.update(prec5.item(), output.size(0)) + + batch_time_m.update(time.time() - end) + end = time.time() + if local_rank == 0 and ( + last_batch or batch_idx % + cfg.LOG_INTERVAL == 0): + log_name = 'Test' + log_suffix + logger.info( + '{0}: [{1:>4d}/{2}] ' + 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) ' + 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' + 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' + 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format( + log_name, batch_idx, last_idx, + batch_time=batch_time_m, loss=losses_m, + top1=prec1_m, top5=prec5_m)) + + writer.add_scalar( + 'Loss' + log_suffix + '/vaild', + prec1_m.avg, + epoch * len(loader) + batch_idx) + writer.add_scalar( + 'Accuracy' + + log_suffix + + '/vaild', + prec1_m.avg, + epoch * + len(loader) + + batch_idx) + + metrics = OrderedDict( + [('loss', losses_m.avg), ('prec1', prec1_m.avg), ('prec5', prec5_m.avg)]) + + return metrics diff --git a/examples/nas/cream/lib/core/train.py b/examples/nas/cream/lib/core/train.py new file mode 100644 index 0000000000..b18a7fe8db --- /dev/null +++ b/examples/nas/cream/lib/core/train.py @@ -0,0 +1,230 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import time +import torchvision +import torch.nn.functional as F + +from lib.utils.util import * + + +def train_epoch( + epoch, + model, + loader, + optimizer, + loss_fn, + prioritized_board, + MetaMN, + cfg, + est=None, + logger=None, + lr_scheduler=None, + saver=None, + output_dir='', + model_ema=None, + local_rank=0): + batch_time_m = AverageMeter() + data_time_m = AverageMeter() + losses_m = AverageMeter() + kd_losses_m = AverageMeter() + prec1_m = AverageMeter() + prec5_m = AverageMeter() + + model.train() + + end = time.time() + last_idx = len(loader) - 1 + + for batch_idx, (input, target) in enumerate(loader): + last_batch = batch_idx == last_idx + data_time_m.update(time.time() - end) + + # get random architectures + prob = prioritized_board.get_prob() + random_cand = prioritized_board.get_cand_with_prob(prob) + random_cand.insert(0, [0]) + random_cand.append([0]) + + # evaluate FLOPs of candidates + # cand_flops = est.get_flops(random_cand) + + # update meta matching networks + # MetaMN.run_update(input, target, random_cand, model, optimizer, + # prioritized_board, loss_fn, epoch, batch_idx) + + # get_best_teacher + # meta_value, teacher_cand = prioritized_board.select_teacher(model, random_cand) + + if prioritized_board.board_size() == 0 or epoch <= cfg.SUPERNET.META_STA_EPOCH: + output = model(input, random_cand) + loss = loss_fn(output, target) + kd_loss, teacher_output, teacher_cand = None, None, None + else: + output = model(input, random_cand) + valid_loss = loss_fn(output, target) + + # get soft label from teacher cand + with torch.no_grad(): + teacher_output = model(input, teacher_cand).detach() + soft_label = F.softmax(teacher_output, dim=1) + kd_loss = cross_entropy_loss_with_soft_target(output, soft_label) + + loss = (meta_value * kd_loss + (2 - meta_value) * valid_loss) / 2 + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + prec1, prec5 = accuracy(output, target, topk=(1, 5)) + if cfg.NUM_GPU == 1: + reduced_loss = loss.data + else: + reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU) + prec1 = reduce_tensor(prec1, cfg.NUM_GPU) + prec5 = reduce_tensor(prec5, cfg.NUM_GPU) + + # prioritized_board.update_prioritized_board(input, teacher_output, output, epoch, prec1, cand_flops, teacher_cand) + + torch.cuda.synchronize() + + if kd_loss is not None: + kd_losses_m.update(kd_loss.item(), input.size(0)) + losses_m.update(reduced_loss.item(), input.size(0)) + prec1_m.update(prec1.item(), output.size(0)) + prec5_m.update(prec5.item(), output.size(0)) + batch_time_m.update(time.time() - end) + + if lr_scheduler is not None: + lr_scheduler.step() + + if last_batch or batch_idx % cfg.LOG_INTERVAL == 0: + lrl = [param_group['lr'] for param_group in optimizer.param_groups] + lr = sum(lrl) / len(lrl) + + if local_rank == 0: + logger.info( + 'Train: {} [{:>4d}/{} ({:>3.0f}%)] ' + 'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) ' + 'KD-Loss: {kd_loss.val:>9.6f} ({kd_loss.avg:>6.4f}) ' + 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' + 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f}) ' + 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ' + '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' + 'LR: {lr:.3e} ' + 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format( + epoch, + batch_idx, len(loader), + 100. * batch_idx / last_idx, + loss=losses_m, + kd_loss=kd_losses_m, + top1=prec1_m, + top5=prec5_m, + batch_time=batch_time_m, + rate=input.size(0) * cfg.NUM_GPU / batch_time_m.val, + rate_avg=input.size(0) * cfg.NUM_GPU / batch_time_m.avg, + lr=lr, + data_time=data_time_m)) + + if cfg.SAVE_IMAGES and output_dir: + torchvision.utils.save_image( + input, os.path.join( + output_dir, 'train-batch-%d.jpg' % + batch_idx), padding=0, normalize=True) + + if saver is not None and cfg.RECOVERY_INTERVAL and ( + last_batch or (batch_idx + 1) % cfg.RECOVERY_INTERVAL == 0): + saver.save_recovery(model, optimizer, cfg, epoch, + model_ema=model_ema, batch_idx=batch_idx) + + end = time.time() + + if local_rank == 0: + for idx, i in enumerate(prioritized_board.prioritized_board): + logger.info("No.{} {}".format(idx, i[:4])) + + return OrderedDict([('loss', losses_m.avg)]) + + +def validate( + model, + loader, + loss_fn, + prioritized_board, + cfg, + log_suffix='', + local_rank=0, + logger=None): + batch_time_m = AverageMeter() + losses_m = AverageMeter() + prec1_m = AverageMeter() + prec5_m = AverageMeter() + + model.eval() + + end = time.time() + last_idx = len(loader) - 1 + + # get random child architecture + random_cand = prioritized_board.get_cand_with_prob(None) + random_cand.insert(0, [0]) + random_cand.append([0]) + + with torch.no_grad(): + for batch_idx, (input, target) in enumerate(loader): + last_batch = batch_idx == last_idx + input = input.cuda() + target = target.cuda() + + output = model(input, random_cand) + if isinstance(output, (tuple, list)): + output = output[0] + + # augmentation reduction + reduce_factor = cfg.TTA + if reduce_factor > 1: + output = output.unfold( + 0, + reduce_factor, + reduce_factor).mean( + dim=2) + target = target[0:target.size(0):reduce_factor] + + loss = loss_fn(output, target) + prec1, prec5 = accuracy(output, target, topk=(1, 5)) + + if cfg.NUM_GPU > 1: + reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU) + prec1 = reduce_tensor(prec1, cfg.NUM_GPU) + prec5 = reduce_tensor(prec5, cfg.NUM_GPU) + else: + reduced_loss = loss.data + + torch.cuda.synchronize() + + losses_m.update(reduced_loss.item(), input.size(0)) + prec1_m.update(prec1.item(), output.size(0)) + prec5_m.update(prec5.item(), output.size(0)) + + batch_time_m.update(time.time() - end) + end = time.time() + if local_rank == 0 and ( + last_batch or batch_idx % + cfg.LOG_INTERVAL == 0): + log_name = 'Test' + log_suffix + logger.info( + '{0}: [{1:>4d}/{2}] ' + 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) ' + 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' + 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' + 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format( + log_name, batch_idx, last_idx, + batch_time=batch_time_m, loss=losses_m, + top1=prec1_m, top5=prec5_m)) + + metrics = OrderedDict( + [('loss', losses_m.avg), ('prec1', prec1_m.avg), ('prec5', prec5_m.avg)]) + + return metrics diff --git a/examples/nas/cream/lib/models/MetaMatchingNetwork.py b/examples/nas/cream/lib/models/MetaMatchingNetwork.py new file mode 100644 index 0000000000..fc6dcb60e5 --- /dev/null +++ b/examples/nas/cream/lib/models/MetaMatchingNetwork.py @@ -0,0 +1,167 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import torch +import torch.nn.functional as F + +from copy import deepcopy + +from lib.utils.util import cross_entropy_loss_with_soft_target + + +class MetaMatchingNetwork(): + def __init__(self, cfg): + self.cfg = cfg + + # only update student network weights + def update_student_weights_only( + self, + random_cand, + grad_1, + optimizer, + model): + for weight, grad_item in zip( + model.module.rand_parameters(random_cand), grad_1): + weight.grad = grad_item + torch.nn.utils.clip_grad_norm_( + model.module.rand_parameters(random_cand), 1) + optimizer.step() + for weight, grad_item in zip( + model.module.rand_parameters(random_cand), grad_1): + del weight.grad + + # only update meta networks weights + def update_meta_weights_only( + self, + random_cand, + teacher_cand, + model, + optimizer, + grad_teacher): + for weight, grad_item in zip(model.module.rand_parameters( + teacher_cand, self.cfg.SUPERNET.PICK_METHOD == 'meta'), grad_teacher): + weight.grad = grad_item + + # clip gradients + torch.nn.utils.clip_grad_norm_( + model.module.rand_parameters( + random_cand, self.cfg.SUPERNET.PICK_METHOD == 'meta'), 1) + + optimizer.step() + for weight, grad_item in zip(model.module.rand_parameters( + teacher_cand, self.cfg.SUPERNET.PICK_METHOD == 'meta'), grad_teacher): + del weight.grad + + # simulate sgd updating + def simulate_sgd_update(self, w, g, optimizer): + return g * optimizer.param_groups[-1]['lr'] + w + + # split training images into several slices + def get_minibatch_input(self, input): + slice = self.cfg.SUPERNET.SLICE + x = deepcopy(input[:slice].clone().detach()) + return x + + def calculate_1st_gradient(self, kd_loss, model, random_cand, optimizer): + optimizer.zero_grad() + grad = torch.autograd.grad( + kd_loss, + model.module.rand_parameters(random_cand), + create_graph=True) + return grad + + def calculate_2nd_gradient( + self, + validation_loss, + model, + optimizer, + random_cand, + teacher_cand, + students_weight): + optimizer.zero_grad() + grad_student_val = torch.autograd.grad( + validation_loss, + model.module.rand_parameters(random_cand), + retain_graph=True) + + grad_teacher = torch.autograd.grad( + students_weight[0], + model.module.rand_parameters( + teacher_cand, + self.cfg.SUPERNET.PICK_METHOD == 'meta'), + grad_outputs=grad_student_val) + return grad_teacher + + # forward training data + def forward_training( + self, + x, + model, + random_cand, + teacher_cand, + meta_value): + output = model(x, random_cand) + with torch.no_grad(): + teacher_output = model(x, teacher_cand) + soft_label = F.softmax(teacher_output, dim=1) + kd_loss = meta_value * \ + cross_entropy_loss_with_soft_target(output, soft_label) + return kd_loss + + # forward validation data + def forward_validation(self, input, target, random_cand, model, loss_fn): + slice = self.cfg.SUPERNET.SLICE + x = input[slice:slice * 2].clone() + output_2 = model(x, random_cand) + validation_loss = loss_fn(output_2, target[slice:slice * 2]) + return validation_loss + + def isUpdate(self, current_epoch, batch_idx, prioritized_board): + isUpdate = True + isUpdate &= (current_epoch > self.cfg.SUPERNET.META_STA_EPOCH) + isUpdate &= (batch_idx > 0) + isUpdate &= (batch_idx % self.cfg.SUPERNET.UPDATE_ITER == 0) + isUpdate &= (prioritized_board.board_size() > 0) + return isUpdate + + # update meta matching networks + def run_update(self, input, target, random_cand, model, optimizer, + prioritized_board, loss_fn, current_epoch, batch_idx): + if self.isUpdate(current_epoch, batch_idx, prioritized_board): + x = self.get_minibatch_input(input) + + meta_value, teacher_cand = prioritized_board.select_teacher( + model, random_cand) + + kd_loss = self.forward_training( + x, model, random_cand, teacher_cand, meta_value) + + # calculate 1st gradient + grad_1st = self.calculate_1st_gradient( + kd_loss, model, random_cand, optimizer) + + # simulate updated student weights + students_weight = [ + self.simulate_sgd_update( + p, grad_item, optimizer) for p, grad_item in zip( + model.module.rand_parameters(random_cand), grad_1st)] + + # update student weights + self.update_student_weights_only( + random_cand, grad_1st, optimizer, model) + + validation_loss = self.forward_validation( + input, target, random_cand, model, loss_fn) + + # calculate 2nd gradient + grad_teacher = self.calculate_2nd_gradient( + validation_loss, model, optimizer, random_cand, teacher_cand, students_weight) + + # update meta matching networks + self.update_meta_weights_only( + random_cand, teacher_cand, model, optimizer, grad_teacher) + + # delete internal variants + del grad_teacher, grad_1st, x, validation_loss, kd_loss, students_weight diff --git a/examples/nas/cream/lib/models/PrioritizedBoard.py b/examples/nas/cream/lib/models/PrioritizedBoard.py new file mode 100644 index 0000000000..e38bedc903 --- /dev/null +++ b/examples/nas/cream/lib/models/PrioritizedBoard.py @@ -0,0 +1,137 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import numpy as np +import torch.nn.functional as F + +from copy import deepcopy + + +class PrioritizedBoard(): + def __init__(self, cfg, CHOICE_NUM=6, sta_num=(4, 4, 4, 4, 4), acc_gap=5): + self.cfg = cfg + self.prioritized_board = [] + self.choice_num = CHOICE_NUM + self.sta_num = sta_num + self.acc_gap = acc_gap + + # select teacher from prioritized board + + def select_teacher(self, model, random_cand): + if self.cfg.SUPERNET.PICK_METHOD == 'top1': + meta_value, teacher_cand = 0.5, sorted( + self.prioritized_board, reverse=True)[0][3] + elif self.cfg.SUPERNET.PICK_METHOD == 'meta': + meta_value, cand_idx, teacher_cand = -1000000000, -1, None + for now_idx, item in enumerate(self.prioritized_board): + inputx = item[4] + output = F.softmax(model(inputx, random_cand), dim=1) + weight = model.module.forward_meta(output - item[5]) + if weight > meta_value: + meta_value = weight + cand_idx = now_idx + teacher_cand = self.prioritized_board[cand_idx][3] + assert teacher_cand is not None + meta_value = F.sigmoid(-weight) + else: + raise ValueError('Method Not supported') + + return meta_value, teacher_cand + + def board_size(self): + return len(self.prioritized_board) + + # get prob from config file + + def get_prob(self): + if self.cfg.SUPERNET.HOW_TO_PROB == 'even' or ( + self.cfg.SUPERNET.HOW_TO_PROB == 'teacher' and len( + self.prioritized_board) == 0): + return None + elif self.cfg.SUPERNET.HOW_TO_PROB == 'pre_prob': + return self.cfg.SUPERNET.PRE_PROB + elif self.cfg.SUPERNET.HOW_TO_PROB == 'teacher': + op_dict = {} + for i in range(self.choice_num): + op_dict[i] = 0 + for item in self.prioritized_board: + cand = item[3] + for block in cand: + for op in block: + op_dict[op] += 1 + sum_op = 0 + for i in range(self.choice_num): + sum_op = sum_op + op_dict[i] + prob = [] + for i in range(self.choice_num): + prob.append(float(op_dict[i]) / sum_op) + del op_dict, sum_op + return prob + + # sample random architecture + + def get_cand_with_prob(self, prob=None): + if prob is None: + get_random_cand = [ + np.random.choice( + self.choice_num, + item).tolist() for item in self.sta_num] + else: + get_random_cand = [ + np.random.choice( + self.choice_num, + item, + prob).tolist() for item in self.sta_num] + + return get_random_cand + + def isUpdate(self, current_epoch, prec1, flops): + if current_epoch <= self.cfg.SUPERNET.META_STA_EPOCH: + return False + + if len(self.prioritized_board) < self.cfg.SUPERNET.POOL_SIZE: + return True + + if prec1 > self.prioritized_board[-1][1] + self.acc_gap: + return True + + if prec1 > self.prioritized_board[-1][1] and flops < self.prioritized_board[-1][2]: + return True + + return False + + def update_prioritized_board( + self, + inputs, + teacher_output, + outputs, + current_epoch, + prec1, + flops, + cand): + if self.isUpdate(current_epoch, prec1, flops): + val_prec1 = prec1 + training_data = deepcopy(inputs[:self.cfg.SUPERNET.SLICE].detach()) + if len(self.prioritized_board) == 0: + features = deepcopy(outputs[:self.cfg.SUPERNET.SLICE].detach()) + else: + features = deepcopy( + teacher_output[:self.cfg.SUPERNET.SLICE].detach()) + self.prioritized_board.append( + (val_prec1, + prec1, + flops, + cand, + training_data, + F.softmax( + features, + dim=1))) + self.prioritized_board = sorted( + self.prioritized_board, reverse=True) + + if len(self.prioritized_board) > self.cfg.SUPERNET.POOL_SIZE: + self.prioritized_board = sorted( + self.prioritized_board, reverse=True) + del self.prioritized_board[-1] diff --git a/examples/nas/cream/lib/models/blocks/__init__.py b/examples/nas/cream/lib/models/blocks/__init__.py new file mode 100644 index 0000000000..83a19f2b91 --- /dev/null +++ b/examples/nas/cream/lib/models/blocks/__init__.py @@ -0,0 +1,2 @@ +from lib.models.blocks.residual_block import get_Bottleneck, get_BasicBlock +from lib.models.blocks.inverted_residual_block import InvertedResidual \ No newline at end of file diff --git a/examples/nas/cream/lib/models/blocks/inverted_residual_block.py b/examples/nas/cream/lib/models/blocks/inverted_residual_block.py new file mode 100644 index 0000000000..2f501b561b --- /dev/null +++ b/examples/nas/cream/lib/models/blocks/inverted_residual_block.py @@ -0,0 +1,113 @@ +# This file is downloaded from +# https://github.com/rwightman/pytorch-image-models + +import torch.nn as nn + +from timm.models.layers import create_conv2d +from timm.models.efficientnet_blocks import make_divisible, resolve_se_args, \ + SqueezeExcite, drop_path + + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE and CondConv routing""" + + def __init__( + self, + in_chs, + out_chs, + dw_kernel_size=3, + stride=1, + dilation=1, + pad_type='', + act_layer=nn.ReLU, + noskip=False, + exp_ratio=1.0, + exp_kernel_size=1, + pw_kernel_size=1, + se_ratio=0., + se_kwargs=None, + norm_layer=nn.BatchNorm2d, + norm_kwargs=None, + conv_kwargs=None, + drop_path_rate=0.): + super(InvertedResidual, self).__init__() + norm_kwargs = norm_kwargs or {} + conv_kwargs = conv_kwargs or {} + mid_chs = make_divisible(in_chs * exp_ratio) + has_se = se_ratio is not None and se_ratio > 0. + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Point-wise expansion + self.conv_pw = create_conv2d( + in_chs, + mid_chs, + exp_kernel_size, + padding=pad_type, + **conv_kwargs) + self.bn1 = norm_layer(mid_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Depth-wise convolution + self.conv_dw = create_conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, + padding=pad_type, depthwise=True, **conv_kwargs) + self.bn2 = norm_layer(mid_chs, **norm_kwargs) + self.act2 = act_layer(inplace=True) + + # Squeeze-and-excitation + if has_se: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = None + + # Point-wise linear projection + self.conv_pwl = create_conv2d( + mid_chs, + out_chs, + pw_kernel_size, + padding=pad_type, + **conv_kwargs) + self.bn3 = norm_layer(out_chs, **norm_kwargs) + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PWL + info = dict( + module='conv_pwl', + hook_type='forward_pre', + num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + info = dict( + module='', + hook_type='', + num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + residual = x + + # Point-wise expansion + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += residual + + return x diff --git a/examples/nas/cream/models/resunit.py b/examples/nas/cream/lib/models/blocks/residual_block.py old mode 100755 new mode 100644 similarity index 87% rename from examples/nas/cream/models/resunit.py rename to examples/nas/cream/lib/models/blocks/residual_block.py index ede5940224..75892eee79 --- a/examples/nas/cream/models/resunit.py +++ b/examples/nas/cream/lib/models/blocks/residual_block.py @@ -1,12 +1,19 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + import torch import torch.nn as nn import torch.nn.functional as F + def conv3x3(in_planes, out_planes, stride=1): "3x3 convolution with padding" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True) + class BasicBlock(nn.Module): expansion = 1 @@ -49,7 +56,11 @@ def __init__(self, inplanes, planes, stride=1, expansion=4): self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True) self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * expansion, kernel_size=1, bias=True) + self.conv3 = nn.Conv2d( + planes, + planes * expansion, + kernel_size=1, + bias=True) self.bn3 = nn.BatchNorm2d(planes * expansion) self.relu = nn.ReLU(inplace=True) self.stride = stride @@ -85,8 +96,10 @@ def forward(self, x): return out + def get_Bottleneck(in_c, out_c, stride): return Bottleneck(in_c, out_c, stride=stride) + def get_BasicBlock(in_c, out_c, stride): - return BasicBlock(in_c, out_c, stride=stride) \ No newline at end of file + return BasicBlock(in_c, out_c, stride=stride) diff --git a/examples/nas/cream/lib/models/builders/build_childnet.py b/examples/nas/cream/lib/models/builders/build_childnet.py new file mode 100755 index 0000000000..8ddfb40024 --- /dev/null +++ b/examples/nas/cream/lib/models/builders/build_childnet.py @@ -0,0 +1,181 @@ +from lib.utils.util import * + +from timm.models.efficientnet_blocks import * + + +class ChildNetBuilder: + def __init__( + self, + channel_multiplier=1.0, + channel_divisor=8, + channel_min=None, + output_stride=32, + pad_type='', + act_layer=None, + se_kwargs=None, + norm_layer=nn.BatchNorm2d, + norm_kwargs=None, + drop_path_rate=0., + feature_location='', + verbose=False, + logger=None): + self.channel_multiplier = channel_multiplier + self.channel_divisor = channel_divisor + self.channel_min = channel_min + self.output_stride = output_stride + self.pad_type = pad_type + self.act_layer = act_layer + self.se_kwargs = se_kwargs + self.norm_layer = norm_layer + self.norm_kwargs = norm_kwargs + self.drop_path_rate = drop_path_rate + self.feature_location = feature_location + assert feature_location in ('pre_pwl', 'post_exp', '') + self.verbose = verbose + self.in_chs = None + self.features = OrderedDict() + self.logger = logger + + def _round_channels(self, chs): + return round_channels( + chs, + self.channel_multiplier, + self.channel_divisor, + self.channel_min) + + def _make_block(self, ba, block_idx, block_count): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self._round_channels(ba['out_chs']) + if 'fake_in_chs' in ba and ba['fake_in_chs']: + ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) + ba['norm_layer'] = self.norm_layer + ba['norm_kwargs'] = self.norm_kwargs + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + if bt == 'ir': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + self.logger.info( + ' InvertedResidual {}, Args: {}'.format( + block_idx, str(ba))) + block = InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + self.logger.info( + ' DepthwiseSeparable {}, Args: {}'.format( + block_idx, str(ba))) + block = DepthwiseSeparableConv(**ba) + elif bt == 'cn': + if self.verbose: + self.logger.info( + ' ConvBnAct {}, Args: {}'.format( + block_idx, str(ba))) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + if self.verbose: + self.logger.info( + 'Building model trunk with %d stages...' % + len(model_block_args)) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + feature_idx = 0 + stages = [] + # outer list of block_args defines the stacks ('stages' by some + # conventions) + for stage_idx, stage_block_args in enumerate(model_block_args): + last_stack = stage_idx == (len(model_block_args) - 1) + if self.verbose: + self.logger.info('Stack: {}'.format(stage_idx)) + assert isinstance(stage_block_args, list) + + blocks = [] + # each stack (stage) contains a list of block arguments + for block_idx, block_args in enumerate(stage_block_args): + last_block = block_idx == (len(stage_block_args) - 1) + extract_features = '' # No features extracted + if self.verbose: + self.logger.info(' Block: {}'.format(block_idx)) + + # Sort out stride, dilation, and feature extraction details + assert block_args['stride'] in (1, 2) + if block_idx >= 1: + # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + do_extract = False + if self.feature_location == 'pre_pwl': + if last_block: + next_stage_idx = stage_idx + 1 + if next_stage_idx >= len(model_block_args): + do_extract = True + else: + do_extract = model_block_args[next_stage_idx][0]['stride'] > 1 + elif self.feature_location == 'post_exp': + if block_args['stride'] > 1 or (last_stack and last_block): + do_extract = True + if do_extract: + extract_features = self.feature_location + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + if self.verbose: + self.logger.info( + ' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride)) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + # create the block + block = self._make_block( + block_args, total_block_idx, total_block_count) + blocks.append(block) + + # stash feature module name and channel info for model feature + # extraction + if extract_features: + feature_module = block.feature_module(extract_features) + if feature_module: + feature_module = 'blocks.{}.{}.'.format( + stage_idx, block_idx) + feature_module + feature_channels = block.feature_channels(extract_features) + self.features[feature_idx] = dict( + name=feature_module, + num_chs=feature_channels + ) + feature_idx += 1 + + # incr global block idx (across all stacks) + total_block_idx += 1 + stages.append(nn.Sequential(*blocks)) + return stages diff --git a/examples/nas/cream/lib/models/builders/build_supernet.py b/examples/nas/cream/lib/models/builders/build_supernet.py new file mode 100644 index 0000000000..0164174af0 --- /dev/null +++ b/examples/nas/cream/lib/models/builders/build_supernet.py @@ -0,0 +1,230 @@ +from copy import deepcopy + +from lib.utils.builder_util import modify_block_args +from lib.models.blocks import get_Bottleneck, InvertedResidual + +from timm.models.efficientnet_blocks import * + + +class SuperNetBuilder: + """ Build Trunk Blocks + """ + + def __init__( + self, + choices, + channel_multiplier=1.0, + channel_divisor=8, + channel_min=None, + output_stride=32, + pad_type='', + act_layer=None, + se_kwargs=None, + norm_layer=nn.BatchNorm2d, + norm_kwargs=None, + drop_path_rate=0., + feature_location='', + verbose=False, + resunit=False, + dil_conv=False, + logger=None): + + # dict + # choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} + self.choices = [[x, y] for x in choices['kernel_size'] + for y in choices['exp_ratio']] + self.choices_num = len(self.choices) - 1 + self.channel_multiplier = channel_multiplier + self.channel_divisor = channel_divisor + self.channel_min = channel_min + self.output_stride = output_stride + self.pad_type = pad_type + self.act_layer = act_layer + self.se_kwargs = se_kwargs + self.norm_layer = norm_layer + self.norm_kwargs = norm_kwargs + self.drop_path_rate = drop_path_rate + self.feature_location = feature_location + assert feature_location in ('pre_pwl', 'post_exp', '') + self.verbose = verbose + self.resunit = resunit + self.dil_conv = dil_conv + self.logger = logger + + # state updated during build, consumed by model + self.in_chs = None + + def _round_channels(self, chs): + return round_channels( + chs, + self.channel_multiplier, + self.channel_divisor, + self.channel_min) + + def _make_block( + self, + ba, + choice_idx, + block_idx, + block_count, + resunit=False, + dil_conv=False): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self._round_channels(ba['out_chs']) + if 'fake_in_chs' in ba and ba['fake_in_chs']: + # FIXME this is a hack to work around mismatch in origin impl input + # filters + ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) + ba['norm_layer'] = self.norm_layer + ba['norm_kwargs'] = self.norm_kwargs + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + if bt == 'ir': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + self.logger.info( + ' InvertedResidual {}, Args: {}'.format( + block_idx, str(ba))) + block = InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + self.logger.info( + ' DepthwiseSeparable {}, Args: {}'.format( + block_idx, str(ba))) + block = DepthwiseSeparableConv(**ba) + elif bt == 'cn': + if self.verbose: + self.logger.info( + ' ConvBnAct {}, Args: {}'.format( + block_idx, str(ba))) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + if choice_idx == self.choice_num - 1: + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + if self.verbose: + self.logger.info( + 'Building model trunk with %d stages...' % + len(model_block_args)) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + feature_idx = 0 + stages = nn.ModuleList() + # outer list of block_args defines the stacks ('stages' by some + # conventions) + for stage_idx, stage_block_args in enumerate(model_block_args): + last_stack = stage_idx == (len(model_block_args) - 1) + if self.verbose: + self.logger.info('Stack: {}'.format(stage_idx)) + assert isinstance(stage_block_args, list) + + blocks = nn.ModuleList() + # each stack (stage) contains a list of block arguments + for block_idx, block_args in enumerate(stage_block_args): + last_block = block_idx == (len(stage_block_args) - 1) + if self.verbose: + self.logger.info(' Block: {}'.format(block_idx)) + + # Sort out stride, dilation, and feature extraction details + assert block_args['stride'] in (1, 2) + if block_idx >= 1: + # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + if self.verbose: + self.logger.info( + ' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride)) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + if stage_idx == 0 or stage_idx == 6: + self.choice_num = 1 + else: + self.choice_num = len(self.choices) + + if self.dil_conv: + self.choice_num += 2 + + choice_blocks = nn.ModuleList() + block_args_copy = deepcopy(block_args) + if self.choice_num == 1: + # create the block + block = self._make_block( + block_args, 0, total_block_idx, total_block_count) + choice_blocks.append(block) + else: + for choice_idx, choice in enumerate(self.choices): + # create the block + block_args = deepcopy(block_args_copy) + block_args = modify_block_args( + block_args, choice[0], choice[1]) + block = self._make_block( + block_args, choice_idx, total_block_idx, total_block_count) + choice_blocks.append(block) + if self.dil_conv: + block_args = deepcopy(block_args_copy) + block_args = modify_block_args(block_args, 3, 0) + block = self._make_block( + block_args, + self.choice_num - 2, + total_block_idx, + total_block_count, + resunit=self.resunit, + dil_conv=self.dil_conv) + choice_blocks.append(block) + + block_args = deepcopy(block_args_copy) + block_args = modify_block_args(block_args, 5, 0) + block = self._make_block( + block_args, + self.choice_num - 1, + total_block_idx, + total_block_count, + resunit=self.resunit, + dil_conv=self.dil_conv) + choice_blocks.append(block) + + if self.resunit: + block = get_Bottleneck(block.conv_pw.in_channels, + block.conv_pwl.out_channels, + block.conv_dw.stride[0]) + choice_blocks.append(block) + + blocks.append(choice_blocks) + # incr global block idx (across all stacks) + total_block_idx += 1 + + stages.append(blocks) + return stages diff --git a/examples/nas/cream/lib/models/structures/childnet.py b/examples/nas/cream/lib/models/structures/childnet.py new file mode 100755 index 0000000000..668b92e157 --- /dev/null +++ b/examples/nas/cream/lib/models/structures/childnet.py @@ -0,0 +1,145 @@ +from lib.utils.builder_util import * +from lib.models.builders.build_childnet import * + +from timm.models.layers import SelectAdaptivePool2d +from timm.models.layers.activations import hard_sigmoid + + +class ChildNet(nn.Module): + + def __init__( + self, + block_args, + num_classes=1000, + in_chans=3, + stem_size=16, + num_features=1280, + head_bias=True, + channel_multiplier=1.0, + pad_type='', + act_layer=nn.ReLU, + drop_rate=0., + drop_path_rate=0., + se_kwargs=None, + norm_layer=nn.BatchNorm2d, + norm_kwargs=None, + global_pool='avg', + logger=None, + verbose=False): + super(ChildNet, self).__init__() + + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + self._in_chs = in_chans + self.logger = logger + + # Stem + stem_size = round_channels(stem_size, channel_multiplier) + self.conv_stem = create_conv2d( + self._in_chs, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size, **norm_kwargs) + self.act1 = act_layer(inplace=True) + self._in_chs = stem_size + + # Middle stages (IR/ER/DS Blocks) + builder = ChildNetBuilder( + channel_multiplier, 8, None, 32, pad_type, act_layer, se_kwargs, + norm_layer, norm_kwargs, drop_path_rate, verbose=verbose) + self.blocks = nn.Sequential(*builder(self._in_chs, block_args)) + # self.blocks = builder(self._in_chs, block_args) + self._in_chs = builder.in_chs + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = create_conv2d( + self._in_chs, + self.num_features, + 1, + padding=pad_type, + bias=head_bias) + self.act2 = act_layer(inplace=True) + + # Classifier + self.classifier = nn.Linear( + self.num_features * + self.global_pool.feat_mult(), + self.num_classes) + + efficientnet_init_weights(self) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.num_classes = num_classes + self.classifier = nn.Linear( + self.num_features * self.global_pool.feat_mult(), + num_classes) if self.num_classes else None + + def forward_features(self, x): + # architecture = [[0], [], [], [], [], [0]] + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = x.flatten(1) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + return x + + +def gen_childnet(arch_list, arch_def, **kwargs): + # arch_list = [[0], [], [], [], [], [0]] + choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} + choices_list = [[x, y] for x in choices['kernel_size'] + for y in choices['exp_ratio']] + + num_features = 1280 + + # act_layer = HardSwish + act_layer = Swish + + new_arch = [] + # change to child arch_def + for i, (layer_choice, layer_arch) in enumerate(zip(arch_list, arch_def)): + if len(layer_arch) == 1: + new_arch.append(layer_arch) + continue + else: + new_layer = [] + for j, (block_choice, block_arch) in enumerate( + zip(layer_choice, layer_arch)): + kernel_size, exp_ratio = choices_list[block_choice] + elements = block_arch.split('_') + block_arch = block_arch.replace( + elements[2], 'k{}'.format(str(kernel_size))) + block_arch = block_arch.replace( + elements[4], 'e{}'.format(str(exp_ratio))) + new_layer.append(block_arch) + new_arch.append(new_layer) + + model_kwargs = dict( + block_args=decode_arch_def(new_arch), + num_features=num_features, + stem_size=16, + norm_kwargs=resolve_bn_args(kwargs), + act_layer=act_layer, + se_kwargs=dict( + act_layer=nn.ReLU, + gate_fn=hard_sigmoid, + reduce_mid=True, + divisor=8), + **kwargs, + ) + model = ChildNet(**model_kwargs) + return model diff --git a/examples/nas/cream/lib/models/structures/supernet.py b/examples/nas/cream/lib/models/structures/supernet.py new file mode 100644 index 0000000000..f8afe3ee6b --- /dev/null +++ b/examples/nas/cream/lib/models/structures/supernet.py @@ -0,0 +1,206 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +from lib.utils.builder_util import * +from lib.utils.search_structure_supernet import * +from lib.models.builders.build_supernet import * +from lib.utils.op_by_layer_dict import flops_op_dict + +from timm.models.layers import SelectAdaptivePool2d +from timm.models.layers.activations import hard_sigmoid + + +class SuperNet(nn.Module): + + def __init__( + self, + block_args, + choices, + num_classes=1000, + in_chans=3, + stem_size=16, + num_features=1280, + head_bias=True, + channel_multiplier=1.0, + pad_type='', + act_layer=nn.ReLU, + drop_rate=0., + drop_path_rate=0., + slice=4, + se_kwargs=None, + norm_layer=nn.BatchNorm2d, + logger=None, + norm_kwargs=None, + global_pool='avg', + resunit=False, + dil_conv=False, + verbose=False): + super(SuperNet, self).__init__() + + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + self._in_chs = in_chans + self.logger = logger + + # Stem + stem_size = round_channels(stem_size, channel_multiplier) + self.conv_stem = create_conv2d( + self._in_chs, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size, **norm_kwargs) + self.act1 = act_layer(inplace=True) + self._in_chs = stem_size + + # Middle stages (IR/ER/DS Blocks) + builder = SuperNetBuilder( + choices, + channel_multiplier, + 8, + None, + 32, + pad_type, + act_layer, + se_kwargs, + norm_layer, + norm_kwargs, + drop_path_rate, + verbose=verbose, + resunit=resunit, + dil_conv=dil_conv, + logger=self.logger) + self.blocks = builder(self._in_chs, block_args) + self._in_chs = builder.in_chs + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = create_conv2d( + self._in_chs, + self.num_features, + 1, + padding=pad_type, + bias=head_bias) + self.act2 = act_layer(inplace=True) + + # Classifier + self.classifier = nn.Linear( + self.num_features * + self.global_pool.feat_mult(), + self.num_classes) + + self.meta_layer = nn.Linear(self.num_classes * slice, 1) + efficientnet_init_weights(self) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.num_classes = num_classes + self.classifier = nn.Linear( + self.num_features * self.global_pool.feat_mult(), + num_classes) if self.num_classes else None + + def forward_features(self, x, architecture): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + for layer, layer_arch in zip(self.blocks, architecture): + for blocks, arch in zip(layer, layer_arch): + if arch == -1: + continue + x = blocks[arch](x) + + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x, architecture): + x = self.forward_features(x, architecture) + x = x.flatten(1) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + def forward_meta(self, features): + return self.meta_layer(features.view(1, -1)) + + def rand_parameters(self, architecture, meta=False): + for name, param in self.named_parameters(recurse=True): + if 'meta' in name and meta: + yield param + elif 'blocks' not in name and 'meta' not in name and (not meta): + yield param + + if not meta: + for layer, layer_arch in zip(self.blocks, architecture): + for blocks, arch in zip(layer, layer_arch): + if arch == -1: + continue + for name, param in blocks[arch].named_parameters( + recurse=True): + yield param + + +class Classifier(nn.Module): + def __init__(self, num_classes=1000): + super(Classifier, self).__init__() + self.classifier = nn.Linear(num_classes, num_classes) + + def forward(self, x): + return self.classifier(x) + + +def gen_supernet(flops_minimum=0, flops_maximum=600, **kwargs): + choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} + + num_features = 1280 + + # act_layer = HardSwish + act_layer = Swish + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_se0.25'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', + 'ir_r1_k3_s1_e4_c24_se0.25'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s1_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', + 'ir_r1_k5_s2_e4_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', + 'ir_r2_k3_s1_e4_c80_se0.25'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', + 'ir_r1_k3_s1_e6_c96_se0.25'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', + 'ir_r1_k5_s2_e6_c192_se0.25'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c320_se0.25'], + ] + + sta_num, arch_def, resolution = search_for_layer( + flops_op_dict, arch_def, flops_minimum, flops_maximum) + + if sta_num is None or arch_def is None or resolution is None: + raise ValueError('Invalid FLOPs Settings') + + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + choices=choices, + num_features=num_features, + stem_size=16, + norm_kwargs=resolve_bn_args(kwargs), + act_layer=act_layer, + se_kwargs=dict( + act_layer=nn.ReLU, + gate_fn=hard_sigmoid, + reduce_mid=True, + divisor=8), + **kwargs, + ) + model = SuperNet(**model_kwargs) + return model, sta_num, resolution diff --git a/examples/nas/cream/lib/utils/builder_util.py b/examples/nas/cream/lib/utils/builder_util.py new file mode 100644 index 0000000000..138e08299c --- /dev/null +++ b/examples/nas/cream/lib/utils/builder_util.py @@ -0,0 +1,273 @@ +import math +import torch.nn as nn + +from timm.utils import * +from timm.models.layers.activations import Swish +from timm.models.layers import CondConv2d, get_condconv_initializer + + +def parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + + +def decode_arch_def( + arch_def, + depth_multiplier=1.0, + depth_trunc='ceil', + experts_multiplier=1): + arch_args = [] + for stack_idx, block_strings in enumerate(arch_def): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + stack_args.append(ba) + repeats.append(rep) + arch_args.append( + scale_stage_depth( + stack_args, + repeats, + depth_multiplier, + depth_trunc)) + return arch_args + + +def modify_block_args(block_args, kernel_size, exp_ratio): + block_type = block_args['block_type'] + if block_type == 'cn': + block_args['kernel_size'] = kernel_size + elif block_type == 'er': + block_args['exp_kernel_size'] = kernel_size + else: + block_args['dw_kernel_size'] = kernel_size + + if block_type == 'ir' or block_type == 'er': + block_args['exp_ratio'] = exp_ratio + return block_args + + +def decode_block_str(block_str): + """ Decode block definition string + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + noskip = False + for op in ops: + # string options being checked on individual basis, combine if they + # grow + if op == 'noskip': + noskip = True + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = nn.ReLU + elif v == 'r6': + value = nn.ReLU6 + elif v == 'sw': + value = Swish + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be + # used + act_layer = options['n'] if 'n' in options else None + exp_kernel_size = parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = parse_ksize(options['p']) if 'p' in options else 1 + # FIXME hack to deal with in_chs issue in TPU def + fake_in_chs = int(options['fc']) if 'fc' in options else 0 + + num_repeat = int(options['r']) + # each type of block has different valid arguments, fill accordingly + if block_type == 'ir': + block_args = dict( + block_type=block_type, + dw_kernel_size=parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + noskip=noskip, + ) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args = dict( + block_type=block_type, + dw_kernel_size=parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or noskip, + ) + elif block_type == 'cn': + block_args = dict( + block_type=block_type, + kernel_size=int(options['k']), + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + ) + else: + assert False, 'Unknown block type (%s)' % block_type + + return block_args, num_repeat + + +def scale_stage_depth( + stack_args, + repeats, + depth_multiplier=1.0, + depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as + # long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every + # stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch + # definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + + +def init_weight_goog(m, n='', fix_group_fanout=True, last_bn=None): + """ Weight initialization as per Tensorflow official implementations. + Args: + m (nn.Module): module to init + n (str): module name + fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs + Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: + * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + """ + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer(lambda w: w.data.normal_( + 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + if n in last_bn: + m.weight.data.zero_() + m.bias.data.zero_() + else: + m.weight.data.fill_(1.0) + m.bias.data.zero_() + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + m.weight.data.uniform_(-init_range, init_range) + m.bias.data.zero_() + + +def efficientnet_init_weights( + model: nn.Module, + init_fn=None, + zero_gamma=False): + last_bn = [] + if zero_gamma: + prev_n = '' + for n, m in model.named_modules(): + if isinstance(m, nn.BatchNorm2d): + if ''.join( + prev_n.split('.')[ + :- + 1]) != ''.join( + n.split('.')[ + :- + 1]): + last_bn.append(prev_n) + prev_n = n + last_bn.append(prev_n) + + init_fn = init_fn or init_weight_goog + for n, m in model.named_modules(): + init_fn(m, n, last_bn=last_bn) + init_fn(m, n, last_bn=last_bn) diff --git a/examples/nas/cream/lib/utils/flops_table.py b/examples/nas/cream/lib/utils/flops_table.py new file mode 100644 index 0000000000..4c9e4e457d --- /dev/null +++ b/examples/nas/cream/lib/utils/flops_table.py @@ -0,0 +1,83 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import torch + +from ptflops import get_model_complexity_info + + +class FlopsEst(object): + def __init__(self, model, input_shape=(2, 3, 224, 224), device='cpu'): + self.block_num = len(model.blocks) + self.choice_num = len(model.blocks[0]) + self.flops_dict = {} + self.params_dict = {} + + if device == 'cpu': + model = model.cpu() + else: + model = model.cuda() + + self.params_fixed = 0 + self.flops_fixed = 0 + + input = torch.randn(input_shape) + + flops, params = get_model_complexity_info( + model.conv_stem, (3, 224, 224), as_strings=False, print_per_layer_stat=False) + self.params_fixed += params / 1e6 + self.flops_fixed += flops / 1e6 + + input = model.conv_stem(input) + + for block_id, block in enumerate(model.blocks): + self.flops_dict[block_id] = {} + self.params_dict[block_id] = {} + for module_id, module in enumerate(block): + self.flops_dict[block_id][module_id] = {} + self.params_dict[block_id][module_id] = {} + for choice_id, choice in enumerate(module): + flops, params = get_model_complexity_info(choice, tuple( + input.shape[1:]), as_strings=False, print_per_layer_stat=False) + # Flops(M) + self.flops_dict[block_id][module_id][choice_id] = flops / 1e6 + # Params(M) + self.params_dict[block_id][module_id][choice_id] = params / 1e6 + + input = choice(input) + + # conv_last + flops, params = get_model_complexity_info(model.global_pool, tuple( + input.shape[1:]), as_strings=False, print_per_layer_stat=False) + self.params_fixed += params / 1e6 + self.flops_fixed += flops / 1e6 + + input = model.global_pool(input) + + # globalpool + flops, params = get_model_complexity_info(model.conv_head, tuple( + input.shape[1:]), as_strings=False, print_per_layer_stat=False) + self.params_fixed += params / 1e6 + self.flops_fixed += flops / 1e6 + + # return params (M) + def get_params(self, arch): + params = 0 + for block_id, block in enumerate(arch): + for module_id, choice in enumerate(block): + if choice == -1: + continue + params += self.params_dict[block_id][module_id][choice] + return params + self.params_fixed + + # return flops (M) + def get_flops(self, arch): + flops = 0 + for block_id, block in enumerate(arch): + for module_id, choice in enumerate(block): + if choice == -1: + continue + flops += self.flops_dict[block_id][module_id][choice] + return flops + self.flops_fixed diff --git a/examples/nas/cream/lib/utils/op_by_layer_dict.py b/examples/nas/cream/lib/utils/op_by_layer_dict.py new file mode 100644 index 0000000000..47ca509ce4 --- /dev/null +++ b/examples/nas/cream/lib/utils/op_by_layer_dict.py @@ -0,0 +1,42 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +# This dictionary is generated from calculating each operation of each layer to quickly search for layers. +# flops_op_dict[which_stage][which_operation] = +# (flops_of_operation_with_stride1, flops_of_operation_with_stride2) + +flops_op_dict = {} +for i in range(5): + flops_op_dict[i] = {} +flops_op_dict[0][0] = (21.828704, 18.820752) +flops_op_dict[0][1] = (32.669328, 28.16048) +flops_op_dict[0][2] = (25.039968, 23.637648) +flops_op_dict[0][3] = (37.486224, 35.385824) +flops_op_dict[0][4] = (29.856864, 30.862992) +flops_op_dict[0][5] = (44.711568, 46.22384) +flops_op_dict[1][0] = (11.808656, 11.86712) +flops_op_dict[1][1] = (17.68624, 17.780848) +flops_op_dict[1][2] = (13.01288, 13.87416) +flops_op_dict[1][3] = (19.492576, 20.791408) +flops_op_dict[1][4] = (14.819216, 16.88472) +flops_op_dict[1][5] = (22.20208, 25.307248) +flops_op_dict[2][0] = (8.198, 10.99632) +flops_op_dict[2][1] = (12.292848, 16.5172) +flops_op_dict[2][2] = (8.69976, 11.99984) +flops_op_dict[2][3] = (13.045488, 18.02248) +flops_op_dict[2][4] = (9.4524, 13.50512) +flops_op_dict[2][5] = (14.174448, 20.2804) +flops_op_dict[3][0] = (12.006112, 15.61632) +flops_op_dict[3][1] = (18.028752, 23.46096) +flops_op_dict[3][2] = (13.009632, 16.820544) +flops_op_dict[3][3] = (19.534032, 25.267296) +flops_op_dict[3][4] = (14.514912, 18.62688) +flops_op_dict[3][5] = (21.791952, 27.9768) +flops_op_dict[4][0] = (11.307456, 15.292416) +flops_op_dict[4][1] = (17.007072, 23.1504) +flops_op_dict[4][2] = (11.608512, 15.894528) +flops_op_dict[4][3] = (17.458656, 24.053568) +flops_op_dict[4][4] = (12.060096, 16.797696) +flops_op_dict[4][5] = (18.136032, 25.40832) \ No newline at end of file diff --git a/examples/nas/cream/lib/utils/search_structure_supernet.py b/examples/nas/cream/lib/utils/search_structure_supernet.py new file mode 100644 index 0000000000..b13491c2c7 --- /dev/null +++ b/examples/nas/cream/lib/utils/search_structure_supernet.py @@ -0,0 +1,47 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +def search_for_layer(flops_op_dict, arch_def, flops_minimum, flops_maximum): + sta_num = [1, 1, 1, 1, 1] + order = [2, 3, 4, 1, 0, 2, 3, 4, 1, 0] + limits = [3, 3, 3, 2, 2, 4, 4, 4, 4, 4] + size_factor = 224 // 32 + base_min_flops = sum([flops_op_dict[i][0][0] for i in range(5)]) + base_max_flops = sum([flops_op_dict[i][5][0] for i in range(5)]) + + if base_min_flops > flops_maximum: + while base_min_flops > flops_maximum and size_factor >= 2: + size_factor = size_factor - 1 + flops_minimum = flops_minimum * (7. / size_factor) + flops_maximum = flops_maximum * (7. / size_factor) + if size_factor < 2: + return None, None, None + elif base_max_flops < flops_minimum: + cur_ptr = 0 + while base_max_flops < flops_minimum and cur_ptr <= 9: + if sta_num[order[cur_ptr]] >= limits[cur_ptr]: + cur_ptr += 1 + continue + base_max_flops = base_max_flops + \ + flops_op_dict[order[cur_ptr]][5][1] + sta_num[order[cur_ptr]] += 1 + if cur_ptr > 7 and base_max_flops < flops_minimum: + return None, None, None + + cur_ptr = 0 + while cur_ptr <= 9: + if sta_num[order[cur_ptr]] >= limits[cur_ptr]: + cur_ptr += 1 + continue + base_max_flops = base_max_flops + flops_op_dict[order[cur_ptr]][5][1] + if base_max_flops <= flops_maximum: + sta_num[order[cur_ptr]] += 1 + else: + break + + arch_def = [item[:i] for i, item in zip([1] + sta_num + [1], arch_def)] + # print(arch_def) + + return sta_num, arch_def, size_factor * 32 diff --git a/examples/nas/cream/lib/utils/util.py b/examples/nas/cream/lib/utils/util.py new file mode 100644 index 0000000000..9324a003cc --- /dev/null +++ b/examples/nas/cream/lib/utils/util.py @@ -0,0 +1,178 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import sys +import argparse +import torch.nn as nn + +from torch import optim as optim +from thop import profile, clever_format + +from timm.utils import * + +from lib.config import cfg + + +def get_path_acc(model, path, val_loader, args, val_iters=50): + prec1_m = AverageMeter() + prec5_m = AverageMeter() + with torch.no_grad(): + for batch_idx, (input, target) in enumerate(val_loader): + if batch_idx >= val_iters: + break + if not args.prefetcher: + input = input.cuda() + target = target.cuda() + + output = model(input, path) + if isinstance(output, (tuple, list)): + output = output[0] + + # augmentation reduction + reduce_factor = args.tta + if reduce_factor > 1: + output = output.unfold( + 0, + reduce_factor, + reduce_factor).mean( + dim=2) + target = target[0:target.size(0):reduce_factor] + + prec1, prec5 = accuracy(output, target, topk=(1, 5)) + + torch.cuda.synchronize() + + prec1_m.update(prec1.item(), output.size(0)) + prec5_m.update(prec5.item(), output.size(0)) + + return (prec1_m.avg, prec5_m.avg) + + +def get_logger(file_path): + """ Make python logger """ + log_format = '%(asctime)s | %(message)s' + logging.basicConfig(stream=sys.stdout, level=logging.INFO, + format=log_format, datefmt='%m/%d %I:%M:%S %p') + logger = logging.getLogger('') + + formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p') + file_handler = logging.FileHandler(file_path) + file_handler.setFormatter(formatter) + + logger.addHandler(file_handler) + + return logger + + +def add_weight_decay_supernet(model, args, weight_decay=1e-5, skip_list=()): + decay = [] + no_decay = [] + meta_layer_no_decay = [] + meta_layer_decay = [] + for name, param in model.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith( + ".bias") or name in skip_list: + if 'meta_layer' in name: + meta_layer_no_decay.append(param) + else: + no_decay.append(param) + else: + if 'meta_layer' in name: + meta_layer_decay.append(param) + else: + decay.append(param) + return [ + {'params': no_decay, 'weight_decay': 0., 'lr': args.lr}, + {'params': decay, 'weight_decay': weight_decay, 'lr': args.lr}, + {'params': meta_layer_no_decay, 'weight_decay': 0., 'lr': args.meta_lr}, + {'params': meta_layer_decay, 'weight_decay': 0, 'lr': args.meta_lr}, + ] + + +def create_optimizer_supernet(args, model, has_apex, filter_bias_and_bn=True): + opt_lower = args.opt.lower() + weight_decay = args.weight_decay + if 'adamw' in opt_lower or 'radam' in opt_lower: + weight_decay /= args.lr + if weight_decay and filter_bias_and_bn: + parameters = add_weight_decay_supernet(model, args, weight_decay) + weight_decay = 0. + else: + parameters = model.parameters() + + if 'fused' in opt_lower: + assert has_apex and torch.cuda.is_available( + ), 'APEX and CUDA required for fused optimizers' + + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if opt_lower == 'sgd' or opt_lower == 'nesterov': + optimizer = optim.SGD( + parameters, + momentum=args.momentum, + weight_decay=weight_decay, + nesterov=True) + elif opt_lower == 'momentum': + optimizer = optim.SGD( + parameters, + momentum=args.momentum, + weight_decay=weight_decay, + nesterov=False) + elif opt_lower == 'adam': + optimizer = optim.Adam( + parameters, weight_decay=weight_decay, eps=args.opt_eps) + else: + assert False and "Invalid optimizer" + raise ValueError + + return optimizer + + +def convert_lowercase(cfg): + keys = cfg.keys() + lowercase_keys = [key.lower() for key in keys] + values = [cfg.get(key) for key in keys] + for lowercase_key, value in zip(lowercase_keys, values): + cfg.setdefault(lowercase_key, value) + return cfg + + +def parse_config_args(exp_name): + parser = argparse.ArgumentParser(description=exp_name) + parser.add_argument( + '--cfg', + type=str, + default='../experiments/workspace/retrain/retrain.yaml', + help='configuration of cream') + parser.add_argument('--local_rank', type=int, default=0, + help='local_rank') + args = parser.parse_args() + + cfg.merge_from_file(args.cfg) + converted_cfg = convert_lowercase(cfg) + + return args, converted_cfg + + +def get_model_flops_params(model, input_size=(1, 3, 224, 224)): + input = torch.randn(input_size) + macs, params = profile(deepcopy(model), inputs=(input,), verbose=False) + macs, params = clever_format([macs, params], "%.3f") + return macs, params + + +def cross_entropy_loss_with_soft_target(pred, soft_target): + logsoftmax = nn.LogSoftmax() + return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1)) + + +def create_supernet_scheduler(cfg, optimizer): + ITERS = cfg.EPOCHS * \ + (1280000 / (cfg.NUM_GPU * cfg.DATASET.BATCH_SIZE)) + lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda step: ( + cfg.LR - step / ITERS) if step <= ITERS else 0, last_epoch=-1) + return lr_scheduler, cfg.EPOCHS diff --git a/examples/nas/cream/models/builder.py b/examples/nas/cream/models/builder.py deleted file mode 100755 index 477a59d143..0000000000 --- a/examples/nas/cream/models/builder.py +++ /dev/null @@ -1,392 +0,0 @@ -import torch -import logging -import math -import re -from collections.__init__ import OrderedDict -from copy import deepcopy -import torch.nn as nn - -from timm.models.layers import CondConv2d, get_condconv_initializer -from timm.models.layers.activations import HardSwish, Swish -from timm.models.efficientnet_blocks import * - -def _decode_block_str(block_str): - """ Decode block definition string - Gets a list of block arg (dicts) through a string notation of arguments. - E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip - All args can exist in any order with the exception of the leading string which - is assumed to indicate the block type. - leading string - block type ( - ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) - r - number of repeat blocks, - k - kernel size, - s - strides (1-9), - e - expansion ratio, - c - output channels, - se - squeeze/excitation ratio - n - activation fn ('re', 'r6', 'hs', or 'sw') - Args: - block_str: a string representation of block arguments. - Returns: - A list of block args (dicts) - Raises: - ValueError: if the string def not properly specified (TODO) - """ - assert isinstance(block_str, str) - ops = block_str.split('_') - block_type = ops[0] # take the block type off the front - ops = ops[1:] - options = {} - noskip = False - for op in ops: - # string options being checked on individual basis, combine if they grow - if op == 'noskip': - noskip = True - elif op.startswith('n'): - # activation fn - key = op[0] - v = op[1:] - if v == 're': - value = nn.ReLU - elif v == 'r6': - value = nn.ReLU6 - elif v == 'sw': - value = Swish - else: - continue - options[key] = value - else: - # all numeric options - splits = re.split(r'(\d.*)', op) - if len(splits) >= 2: - key, value = splits[:2] - options[key] = value - - # if act_layer is None, the model default (passed to model init) will be used - act_layer = options['n'] if 'n' in options else None - exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 - pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 - fake_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def - - num_repeat = int(options['r']) - # each type of block has different valid arguments, fill accordingly - if block_type == 'ir': - block_args = dict( - block_type=block_type, - dw_kernel_size=_parse_ksize(options['k']), - exp_kernel_size=exp_kernel_size, - pw_kernel_size=pw_kernel_size, - out_chs=int(options['c']), - exp_ratio=float(options['e']), - se_ratio=float(options['se']) if 'se' in options else None, - stride=int(options['s']), - act_layer=act_layer, - noskip=noskip, - ) - if 'cc' in options: - block_args['num_experts'] = int(options['cc']) - elif block_type == 'ds' or block_type == 'dsa': - block_args = dict( - block_type=block_type, - dw_kernel_size=_parse_ksize(options['k']), - pw_kernel_size=pw_kernel_size, - out_chs=int(options['c']), - se_ratio=float(options['se']) if 'se' in options else None, - stride=int(options['s']), - act_layer=act_layer, - pw_act=block_type == 'dsa', - noskip=block_type == 'dsa' or noskip, - ) - elif block_type == 'cn': - block_args = dict( - block_type=block_type, - kernel_size=int(options['k']), - out_chs=int(options['c']), - stride=int(options['s']), - act_layer=act_layer, - ) - else: - assert False, 'Unknown block type (%s)' % block_type - - return block_args, num_repeat - -def modify_block_args(block_args, kernel_size, exp_ratio): - # kernel_size: 3,5,7 - # exp_ratio: 4,6 - block_type = block_args['block_type'] - # each type of block has different valid arguments, fill accordingly - if block_type == 'cn': - block_args['kernel_size'] = kernel_size - elif block_type == 'er': - block_args['exp_kernel_size'] = kernel_size - else: - block_args['dw_kernel_size'] = kernel_size - - if block_type == 'ir' or block_type == 'er': - block_args['exp_ratio'] = exp_ratio - return block_args - -def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): - """ Per-stage depth scaling - Scales the block repeats in each stage. This depth scaling impl maintains - compatibility with the EfficientNet scaling method, while allowing sensible - scaling for other models that may have multiple block arg definitions in each stage. - """ - - # We scale the total repeat count for each stage, there may be multiple - # block arg defs per stage so we need to sum. - num_repeat = sum(repeats) - if depth_trunc == 'round': - # Truncating to int by rounding allows stages with few repeats to remain - # proportionally smaller for longer. This is a good choice when stage definitions - # include single repeat stages that we'd prefer to keep that way as long as possible - num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) - else: - # The default for EfficientNet truncates repeats to int via 'ceil'. - # Any multiplier > 1.0 will result in an increased depth for every stage. - num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) - - # Proportionally distribute repeat count scaling to each block definition in the stage. - # Allocation is done in reverse as it results in the first block being less likely to be scaled. - # The first block makes less sense to repeat in most of the arch definitions. - repeats_scaled = [] - for r in repeats[::-1]: - rs = max(1, round((r / num_repeat * num_repeat_scaled))) - repeats_scaled.append(rs) - num_repeat -= r - num_repeat_scaled -= rs - repeats_scaled = repeats_scaled[::-1] - - # Apply the calculated scaling to each block arg in the stage - sa_scaled = [] - for ba, rep in zip(stack_args, repeats_scaled): - sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) - return sa_scaled - -def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1): - arch_args = [] - for stack_idx, block_strings in enumerate(arch_def): - assert isinstance(block_strings, list) - stack_args = [] - repeats = [] - for block_str in block_strings: - assert isinstance(block_str, str) - ba, rep = _decode_block_str(block_str) - if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: - ba['num_experts'] *= experts_multiplier - stack_args.append(ba) - repeats.append(rep) - arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc)) - return arch_args - -class ChildNetBuilder: - """ Build Trunk Blocks - """ - def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None, - output_stride=32, pad_type='', act_layer=None, se_kwargs=None, - norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0., feature_location='', - verbose=False): - self.channel_multiplier = channel_multiplier - self.channel_divisor = channel_divisor - self.channel_min = channel_min - self.output_stride = output_stride - self.pad_type = pad_type - self.act_layer = act_layer - self.se_kwargs = se_kwargs - self.norm_layer = norm_layer - self.norm_kwargs = norm_kwargs - self.drop_path_rate = drop_path_rate - self.feature_location = feature_location - assert feature_location in ('pre_pwl', 'post_exp', '') - self.verbose = verbose - - # state updated during build, consumed by model - self.in_chs = None - self.features = OrderedDict() - - def _round_channels(self, chs): - return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min) - - def _make_block(self, ba, block_idx, block_count): - drop_path_rate = self.drop_path_rate * block_idx / block_count - bt = ba.pop('block_type') - ba['in_chs'] = self.in_chs - ba['out_chs'] = self._round_channels(ba['out_chs']) - if 'fake_in_chs' in ba and ba['fake_in_chs']: - # FIXME this is a hack to work around mismatch in origin impl input filters - ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) - ba['norm_layer'] = self.norm_layer - ba['norm_kwargs'] = self.norm_kwargs - ba['pad_type'] = self.pad_type - # block act fn overrides the model default - ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer - assert ba['act_layer'] is not None - if bt == 'ir': - ba['drop_path_rate'] = drop_path_rate - ba['se_kwargs'] = self.se_kwargs - if self.verbose: - logging.info(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba))) - block = InvertedResidual(**ba) - elif bt == 'ds' or bt == 'dsa': - ba['drop_path_rate'] = drop_path_rate - ba['se_kwargs'] = self.se_kwargs - if self.verbose: - logging.info(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba))) - block = DepthwiseSeparableConv(**ba) - elif bt == 'cn': - if self.verbose: - logging.info(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba))) - block = ConvBnAct(**ba) - else: - assert False, 'Uknkown block type (%s) while building model.' % bt - self.in_chs = ba['out_chs'] # update in_chs for arg of next block - - return block - - def __call__(self, in_chs, model_block_args): - """ Build the blocks - Args: - in_chs: Number of input-channels passed to first block - model_block_args: A list of lists, outer list defines stages, inner - list contains strings defining block configuration(s) - Return: - List of block stacks (each stack wrapped in nn.Sequential) - """ - if self.verbose: - logging.info('Building model trunk with %d stages...' % len(model_block_args)) - self.in_chs = in_chs - total_block_count = sum([len(x) for x in model_block_args]) - total_block_idx = 0 - current_stride = 2 - current_dilation = 1 - feature_idx = 0 - stages = [] - # outer list of block_args defines the stacks ('stages' by some conventions) - for stage_idx, stage_block_args in enumerate(model_block_args): - last_stack = stage_idx == (len(model_block_args) - 1) - if self.verbose: - logging.info('Stack: {}'.format(stage_idx)) - assert isinstance(stage_block_args, list) - - blocks = [] - # each stack (stage) contains a list of block arguments - for block_idx, block_args in enumerate(stage_block_args): - last_block = block_idx == (len(stage_block_args) - 1) - extract_features = '' # No features extracted - if self.verbose: - logging.info(' Block: {}'.format(block_idx)) - - # Sort out stride, dilation, and feature extraction details - assert block_args['stride'] in (1, 2) - if block_idx >= 1: - # only the first block in any stack can have a stride > 1 - block_args['stride'] = 1 - - do_extract = False - if self.feature_location == 'pre_pwl': - if last_block: - next_stage_idx = stage_idx + 1 - if next_stage_idx >= len(model_block_args): - do_extract = True - else: - do_extract = model_block_args[next_stage_idx][0]['stride'] > 1 - elif self.feature_location == 'post_exp': - if block_args['stride'] > 1 or (last_stack and last_block) : - do_extract = True - if do_extract: - extract_features = self.feature_location - - next_dilation = current_dilation - if block_args['stride'] > 1: - next_output_stride = current_stride * block_args['stride'] - if next_output_stride > self.output_stride: - next_dilation = current_dilation * block_args['stride'] - block_args['stride'] = 1 - if self.verbose: - logging.info(' Converting stride to dilation to maintain output_stride=={}'.format( - self.output_stride)) - else: - current_stride = next_output_stride - block_args['dilation'] = current_dilation - if next_dilation != current_dilation: - current_dilation = next_dilation - - # create the block - block = self._make_block(block_args, total_block_idx, total_block_count) - blocks.append(block) - - # stash feature module name and channel info for model feature extraction - if extract_features: - feature_module = block.feature_module(extract_features) - if feature_module: - feature_module = 'blocks.{}.{}.'.format(stage_idx, block_idx) + feature_module - feature_channels = block.feature_channels(extract_features) - self.features[feature_idx] = dict( - name=feature_module, - num_chs=feature_channels - ) - feature_idx += 1 - - total_block_idx += 1 # incr global block idx (across all stacks) - stages.append(nn.Sequential(*blocks)) - return stages - -def _init_weight_goog(m, n='', fix_group_fanout=True, last_bn=None): - """ Weight initialization as per Tensorflow official implementations. - Args: - m (nn.Module): module to init - n (str): module name - fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs - Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: - * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py - * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py - """ - if isinstance(m, CondConv2d): - fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - if fix_group_fanout: - fan_out //= m.groups - init_weight_fn = get_condconv_initializer( - lambda w: w.data.normal_(0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) - init_weight_fn(m.weight) - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.Conv2d): - fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - if fix_group_fanout: - fan_out //= m.groups - m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.BatchNorm2d): - if n in last_bn: - m.weight.data.zero_() - m.bias.data.zero_() - else: - m.weight.data.fill_(1.0) - m.bias.data.zero_() - m.weight.data.fill_(1.0) - m.bias.data.zero_() - elif isinstance(m, nn.Linear): - fan_out = m.weight.size(0) # fan-out - fan_in = 0 - if 'routing_fn' in n: - fan_in = m.weight.size(1) - init_range = 1.0 / math.sqrt(fan_in + fan_out) - m.weight.data.uniform_(-init_range, init_range) - m.bias.data.zero_() - - -def efficientnet_init_weights(model: nn.Module, init_fn=None, zero_gamma=False): - last_bn = [] - if zero_gamma: - prev_n = '' - for n, m in model.named_modules(): - if isinstance(m, nn.BatchNorm2d): - if ''.join(prev_n.split('.')[:-1]) != ''.join(n.split('.')[:-1]): - last_bn.append(prev_n) - prev_n = n - last_bn.append(prev_n) - - init_fn = init_fn or _init_weight_goog - for n, m in model.named_modules(): - init_fn(m, n, last_bn=last_bn) diff --git a/examples/nas/cream/models/hbuilder.py b/examples/nas/cream/models/hbuilder.py deleted file mode 100755 index 3b6268cb81..0000000000 --- a/examples/nas/cream/models/hbuilder.py +++ /dev/null @@ -1,417 +0,0 @@ -import torch -import logging -import math -import re -from collections.__init__ import OrderedDict -from copy import deepcopy -import torch.nn as nn - -from timm.models.layers import CondConv2d, get_condconv_initializer -from timm.models.layers.activations import HardSwish, Swish -from timm.models.efficientnet_blocks import * - -from nni.nas.pytorch import mutables - -def _decode_block_str(block_str): - """ Decode block definition string - Gets a list of block arg (dicts) through a string notation of arguments. - E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip - All args can exist in any order with the exception of the leading string which - is assumed to indicate the block type. - leading string - block type ( - ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) - r - number of repeat blocks, - k - kernel size, - s - strides (1-9), - e - expansion ratio, - c - output channels, - se - squeeze/excitation ratio - n - activation fn ('re', 'r6', 'hs', or 'sw') - Args: - block_str: a string representation of block arguments. - Returns: - A list of block args (dicts) - Raises: - ValueError: if the string def not properly specified (TODO) - """ - assert isinstance(block_str, str) - ops = block_str.split('_') - block_type = ops[0] # take the block type off the front - ops = ops[1:] - options = {} - noskip = False - for op in ops: - # string options being checked on individual basis, combine if they grow - if op == 'noskip': - noskip = True - elif op.startswith('n'): - # activation fn - key = op[0] - v = op[1:] - if v == 're': - value = nn.ReLU - elif v == 'r6': - value = nn.ReLU6 - elif v == 'sw': - value = Swish - else: - continue - options[key] = value - else: - # all numeric options - splits = re.split(r'(\d.*)', op) - if len(splits) >= 2: - key, value = splits[:2] - options[key] = value - - # if act_layer is None, the model default (passed to model init) will be used - act_layer = options['n'] if 'n' in options else None - exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 - pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 - fake_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def - - num_repeat = int(options['r']) - # each type of block has different valid arguments, fill accordingly - if block_type == 'ir': - block_args = dict( - block_type=block_type, - dw_kernel_size=_parse_ksize(options['k']), - exp_kernel_size=exp_kernel_size, - pw_kernel_size=pw_kernel_size, - out_chs=int(options['c']), - exp_ratio=float(options['e']), - se_ratio=float(options['se']) if 'se' in options else None, - stride=int(options['s']), - act_layer=act_layer, - noskip=noskip, - ) - if 'cc' in options: - block_args['num_experts'] = int(options['cc']) - elif block_type == 'ds' or block_type == 'dsa': - block_args = dict( - block_type=block_type, - dw_kernel_size=_parse_ksize(options['k']), - pw_kernel_size=pw_kernel_size, - out_chs=int(options['c']), - se_ratio=float(options['se']) if 'se' in options else None, - stride=int(options['s']), - act_layer=act_layer, - pw_act=block_type == 'dsa', - noskip=block_type == 'dsa' or noskip, - ) - elif block_type == 'er': - block_args = dict( - block_type=block_type, - exp_kernel_size=_parse_ksize(options['k']), - pw_kernel_size=pw_kernel_size, - out_chs=int(options['c']), - exp_ratio=float(options['e']), - fake_in_chs=fake_in_chs, - se_ratio=float(options['se']) if 'se' in options else None, - stride=int(options['s']), - act_layer=act_layer, - noskip=noskip, - ) - elif block_type == 'cn': - block_args = dict( - block_type=block_type, - kernel_size=int(options['k']), - out_chs=int(options['c']), - stride=int(options['s']), - act_layer=act_layer, - ) - else: - assert False, 'Unknown block type (%s)' % block_type - - return block_args, num_repeat - -def modify_block_args(block_args, kernel_size, exp_ratio): - # kernel_size: 3,5,7 - # exp_ratio: 4,6 - block_type = block_args['block_type'] - # each type of block has different valid arguments, fill accordingly - if block_type == 'cn': - block_args['kernel_size'] = kernel_size - elif block_type == 'er': - block_args['exp_kernel_size'] = kernel_size - else: - block_args['dw_kernel_size'] = kernel_size - - if block_type == 'ir' or block_type == 'er': - block_args['exp_ratio'] = exp_ratio - return block_args - -def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): - """ Per-stage depth scaling - Scales the block repeats in each stage. This depth scaling impl maintains - compatibility with the EfficientNet scaling method, while allowing sensible - scaling for other models that may have multiple block arg definitions in each stage. - """ - - # We scale the total repeat count for each stage, there may be multiple - # block arg defs per stage so we need to sum. - num_repeat = sum(repeats) - if depth_trunc == 'round': - # Truncating to int by rounding allows stages with few repeats to remain - # proportionally smaller for longer. This is a good choice when stage definitions - # include single repeat stages that we'd prefer to keep that way as long as possible - num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) - else: - # The default for EfficientNet truncates repeats to int via 'ceil'. - # Any multiplier > 1.0 will result in an increased depth for every stage. - num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) - - # Proportionally distribute repeat count scaling to each block definition in the stage. - # Allocation is done in reverse as it results in the first block being less likely to be scaled. - # The first block makes less sense to repeat in most of the arch definitions. - repeats_scaled = [] - for r in repeats[::-1]: - rs = max(1, round((r / num_repeat * num_repeat_scaled))) - repeats_scaled.append(rs) - num_repeat -= r - num_repeat_scaled -= rs - repeats_scaled = repeats_scaled[::-1] - - # Apply the calculated scaling to each block arg in the stage - sa_scaled = [] - for ba, rep in zip(stack_args, repeats_scaled): - sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) - return sa_scaled - - -def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1): - arch_args = [] - for stack_idx, block_strings in enumerate(arch_def): - assert isinstance(block_strings, list) - stack_args = [] - repeats = [] - for block_str in block_strings: - assert isinstance(block_str, str) - ba, rep = _decode_block_str(block_str) - if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: - ba['num_experts'] *= experts_multiplier - stack_args.append(ba) - repeats.append(rep) - arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc)) - return arch_args - - -class SuperNetBuilder: - """ Build Trunk Blocks - """ - def __init__(self, choices, channel_multiplier=1.0, channel_divisor=8, channel_min=None, - output_stride=32, pad_type='', act_layer=None, se_kwargs=None, - norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0., feature_location='', - verbose=False, resunit=False, dil_conv=False): - - # dict - # choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} - self.choices = [[x,y] for x in choices['kernel_size'] for y in choices['exp_ratio']] - self.choices_num = len(self.choices)-1 - self.channel_multiplier = channel_multiplier - self.channel_divisor = channel_divisor - self.channel_min = channel_min - self.output_stride = output_stride - self.pad_type = pad_type - self.act_layer = act_layer - self.se_kwargs = se_kwargs - self.norm_layer = norm_layer - self.norm_kwargs = norm_kwargs - self.drop_path_rate = drop_path_rate - self.feature_location = feature_location - assert feature_location in ('pre_pwl', 'post_exp', '') - self.verbose = verbose - self.resunit = resunit - self.dil_conv = dil_conv - - # state updated during build, consumed by model - self.in_chs = None - - def _round_channels(self, chs): - return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min) - - def _make_block(self, ba, choice_idx, block_idx, block_count, resunit=False, dil_conv=False): - drop_path_rate = self.drop_path_rate * block_idx / block_count - bt = ba.pop('block_type') - ba['in_chs'] = self.in_chs - ba['out_chs'] = self._round_channels(ba['out_chs']) - if 'fake_in_chs' in ba and ba['fake_in_chs']: - # FIXME this is a hack to work around mismatch in origin impl input filters - ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) - ba['norm_layer'] = self.norm_layer - ba['norm_kwargs'] = self.norm_kwargs - ba['pad_type'] = self.pad_type - # block act fn overrides the model default - ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer - assert ba['act_layer'] is not None - if bt == 'ir': - ba['drop_path_rate'] = drop_path_rate - ba['se_kwargs'] = self.se_kwargs - if self.verbose: - logging.info(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba))) - block = InvertedResidual(**ba) - elif bt == 'ds' or bt == 'dsa': - ba['drop_path_rate'] = drop_path_rate - ba['se_kwargs'] = self.se_kwargs - if self.verbose: - logging.info(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba))) - block = DepthwiseSeparableConv(**ba) - elif bt == 'cn': - if self.verbose: - logging.info(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba))) - block = ConvBnAct(**ba) - else: - assert False, 'Uknkown block type (%s) while building model.' % bt - if choice_idx == self.choice_num-1: - self.in_chs = ba['out_chs'] # update in_chs for arg of next block - - return block - - def __call__(self, in_chs, model_block_args): - """ Build the blocks - Args: - in_chs: Number of input-channels passed to first block - model_block_args: A list of lists, outer list defines stages, inner - list contains strings defining block configuration(s) - Return: - List of block stacks (each stack wrapped in nn.Sequential) - """ - if self.verbose: - logging.info('Building model trunk with %d stages...' % len(model_block_args)) - self.in_chs = in_chs - total_block_count = sum([len(x) for x in model_block_args]) - total_block_idx = 0 - current_stride = 2 - current_dilation = 1 - feature_idx = 0 - stages = [] - # outer list of block_args defines the stacks ('stages' by some conventions) - for stage_idx, stage_block_args in enumerate(model_block_args): - last_stack = stage_idx == (len(model_block_args) - 1) - if self.verbose: - logging.info('Stack: {}'.format(stage_idx)) - assert isinstance(stage_block_args, list) - - # blocks = [] - # each stack (stage) contains a list of block arguments - for block_idx, block_args in enumerate(stage_block_args): - last_block = block_idx == (len(stage_block_args) - 1) - if self.verbose: - logging.info(' Block: {}'.format(block_idx)) - - # Sort out stride, dilation, and feature extraction details - assert block_args['stride'] in (1, 2) - if block_idx >= 1: - # only the first block in any stack can have a stride > 1 - block_args['stride'] = 1 - - next_dilation = current_dilation - if block_args['stride'] > 1: - next_output_stride = current_stride * block_args['stride'] - if next_output_stride > self.output_stride: - next_dilation = current_dilation * block_args['stride'] - block_args['stride'] = 1 - if self.verbose: - logging.info(' Converting stride to dilation to maintain output_stride=={}'.format( - self.output_stride)) - else: - current_stride = next_output_stride - block_args['dilation'] = current_dilation - if next_dilation != current_dilation: - current_dilation = next_dilation - - - if stage_idx==0 or stage_idx==6: - self.choice_num = 1 - else: - self.choice_num = len(self.choices) - - if self.dil_conv: - self.choice_num += 2 - - choice_blocks = [] - block_args_copy = deepcopy(block_args) - if self.choice_num == 1: - # create the block - block = self._make_block(block_args, 0, total_block_idx, total_block_count) - choice_blocks.append(block) - else: - for choice_idx, choice in enumerate(self.choices): - # create the block - block_args = deepcopy(block_args_copy) - block_args = modify_block_args(block_args, choice[0], choice[1]) - block = self._make_block(block_args, choice_idx, total_block_idx, total_block_count) - choice_blocks.append(block) - if self.dil_conv: - block_args = deepcopy(block_args_copy) - block_args = modify_block_args(block_args, 3, 0) - block = self._make_block(block_args, self.choice_num - 2, total_block_idx, total_block_count, - resunit=self.resunit, dil_conv=self.dil_conv) - choice_blocks.append(block) - - block_args = deepcopy(block_args_copy) - block_args = modify_block_args(block_args, 5, 0) - block = self._make_block(block_args, self.choice_num - 1, total_block_idx, total_block_count, - resunit=self.resunit, dil_conv=self.dil_conv) - choice_blocks.append(block) - - if self.resunit: - from models.resunit import get_Bottleneck - block = get_Bottleneck(block.conv_pw.in_channels, - block.conv_pwl.out_channels, - block.conv_dw.stride[0]) - choice_blocks.append(block) - - choice_block = mutables.LayerChoice(choice_blocks) - stages.append(choice_block) - # create the block - # block = self._make_block(block_args, total_block_idx, total_block_count) - total_block_idx += 1 # incr global block idx (across all stacks) - - # stages.append(blocks) - return stages - - -def _init_weight_goog(m, n='', fix_group_fanout=True): - """ Weight initialization as per Tensorflow official implementations. - Args: - m (nn.Module): module to init - n (str): module name - fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs - Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: - * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py - * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py - """ - if isinstance(m, CondConv2d): - fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - if fix_group_fanout: - fan_out //= m.groups - init_weight_fn = get_condconv_initializer( - lambda w: w.data.normal_(0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) - init_weight_fn(m.weight) - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.Conv2d): - fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - if fix_group_fanout: - fan_out //= m.groups - m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1.0) - m.bias.data.zero_() - elif isinstance(m, nn.Linear): - fan_out = m.weight.size(0) # fan-out - fan_in = 0 - if 'routing_fn' in n: - fan_in = m.weight.size(1) - init_range = 1.0 / math.sqrt(fan_in + fan_out) - m.weight.data.uniform_(-init_range, init_range) - m.bias.data.zero_() - - -def efficientnet_init_weights(model: nn.Module, init_fn=None): - init_fn = init_fn or _init_weight_goog - for n, m in model.named_modules(): - init_fn(m, n) diff --git a/examples/nas/cream/models/hypernet.py b/examples/nas/cream/models/hypernet.py deleted file mode 100755 index 1e47775a04..0000000000 --- a/examples/nas/cream/models/hypernet.py +++ /dev/null @@ -1,307 +0,0 @@ -import torch -import torch.nn as nn -from torch.nn import functional as F - -from nni.nas.pytorch import mutables -from models.hbuilder import * - -DEFAULT_CROP_PCT = 0.875 -IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) -IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) - - -def _cfg(url='', **kwargs): - return { - 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), - 'crop_pct': 0.875, 'interpolation': 'bilinear', - 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, - 'first_conv': 'conv_stem', 'classifier': 'classifier', - **kwargs - } - -_DEBUG = False - - -class SuperNet(nn.Module): - - def __init__(self, block_args, choices, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, - head_bias=True, - channel_multiplier=1.0, pad_type='', act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0., slice=4, - se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, global_pool='avg', resunit=False, - dil_conv=False): - super(SuperNet, self).__init__() - - self.num_classes = num_classes - self.num_features = num_features - self.drop_rate = drop_rate - self._in_chs = in_chans - - # Stem - stem_size = round_channels(stem_size, channel_multiplier) - self.conv_stem = create_conv2d(self._in_chs, stem_size, 3, stride=2, padding=pad_type) - self.bn1 = norm_layer(stem_size, **norm_kwargs) - self.act1 = act_layer(inplace=True) - self._in_chs = stem_size - - # Middle stages (IR/ER/DS Blocks) - builder = SuperNetBuilder( - choices, channel_multiplier, 8, None, 32, pad_type, act_layer, se_kwargs, - norm_layer, norm_kwargs, drop_path_rate, verbose=_DEBUG, resunit=resunit, dil_conv=dil_conv) - # self.blocks = nn.ModuleList(*builder(self._in_chs, block_args)) - blocks = builder(self._in_chs, block_args) - self.blocks = nn.Sequential(*blocks) - self._in_chs = builder.in_chs - - # Head + Pooling - self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) - self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type, bias=head_bias) - self.act2 = act_layer(inplace=True) - - # Classifier - self.classifier = nn.Linear(self.num_features * self.global_pool.feat_mult(), self.num_classes) - - self.meta_layer = nn.Linear(self.num_classes * slice, 1) - efficientnet_init_weights(self) - - def get_classifier(self): - return self.classifier - - def reset_classifier(self, num_classes, global_pool='avg'): - self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) - self.num_classes = num_classes - self.classifier = nn.Linear( - self.num_features * self.global_pool.feat_mult(), num_classes) if self.num_classes else None - - def forward_features(self, x, cand): - # architecture = [[0], [], [], [], [], [0]] - x = self.conv_stem(x) - x = self.bn1(x) - x = self.act1(x) - if cand is not None: - pass # x = self.blocks(x) - else: - x = self.blocks(x) - x = self.global_pool(x) - x = self.conv_head(x) - x = self.act2(x) - return x - - def forward(self, x, cand=None): - x = self.forward_features(x, cand) - x = x.flatten(1) - if self.drop_rate > 0.: - x = F.dropout(x, p=self.drop_rate, training=self.training) - return self.classifier(x) - - def forward_meta(self, features): - return self.meta_layer(features.view(1, -1)) - - def rand_parameters(self, architecture, meta=False): - for name, param in self.named_parameters(recurse=True): - if 'meta' in name and meta: - yield param - elif 'blocks' not in name and 'meta' not in name and (not meta): - yield param - - if not meta: - for layer, layer_arch in zip(self.blocks, architecture.keys()): - for choice_idx, choice in enumerate(architecture[layer_arch]): - if choice: - for name, param in layer[choice_idx].named_parameters(recurse=True): - yield param - - -def search_for_layer(flops_op_dict, arch_def, flops_minimum, flops_maximum): - sta_num = [1, 1, 1, 1, 1] - order = [2, 3, 4, 1, 0, 2, 3, 4, 1, 0] - limits = [3, 3, 3, 2, 2, 4, 4, 4, 4, 4] - size_factor = 7 - base_min_flops = sum([flops_op_dict[i][0][0] for i in range(5)]) - base_max_flops = sum([flops_op_dict[i][5][0] for i in range(5)]) - - - if base_min_flops > flops_maximum: - while base_min_flops > flops_maximum and size_factor >= 2: - size_factor = size_factor - 1 - flops_minimum = flops_minimum * (7. / size_factor) - flops_maximum = flops_maximum * (7. / size_factor) - if size_factor < 2: - return None, None, None - elif base_max_flops < flops_minimum: - cur_ptr = 0 - while base_max_flops < flops_minimum and cur_ptr <= 9: - if sta_num[order[cur_ptr]] >= limits[cur_ptr]: - cur_ptr += 1 - continue - base_max_flops = base_max_flops + flops_op_dict[order[cur_ptr]][5][1] - sta_num[order[cur_ptr]] += 1 - if cur_ptr > 7 and base_max_flops < flops_minimum: - return None, None, None - - cur_ptr = 0 - while cur_ptr <= 9: - if sta_num[order[cur_ptr]] >= limits[cur_ptr]: - cur_ptr += 1 - continue - base_max_flops = base_max_flops + flops_op_dict[order[cur_ptr]][5][1] - if base_max_flops <= flops_maximum: - sta_num[order[cur_ptr]] += 1 - else: - break - - arch_def = [item[:i] for i, item in zip([1]+sta_num+[1], arch_def)] - # print(arch_def) - - return sta_num, arch_def, size_factor - -def _gen_supernet(flops_minimum=0, flops_maximum=600, **kwargs): - choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} - - num_features = 1280 - - # act_layer = HardSwish - act_layer = Swish - arch_def = [ - # stage 0, 112x112 in - ['ds_r1_k3_s1_e1_c16_se0.25'], - # stage 1, 112x112 in - ['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', - 'ir_r1_k3_s1_e4_c24_se0.25'], - # stage 2, 56x56 in - ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s1_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', - 'ir_r1_k5_s2_e4_c40_se0.25'], - # stage 3, 28x28 in - ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', - 'ir_r2_k3_s1_e4_c80_se0.25'], - # stage 4, 14x14in - ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', - 'ir_r1_k3_s1_e6_c96_se0.25'], - # stage 5, 14x14in - ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', - 'ir_r1_k5_s2_e6_c192_se0.25'], - # stage 6, 7x7 in - ['cn_r1_k1_s1_c320_se0.25'], - ] - - flops_op_dict = {} - for i in range(5): - flops_op_dict[i] = {} - flops_op_dict[0][0] = (21.828704, 18.820752) - flops_op_dict[0][1] = (32.669328, 28.16048) - flops_op_dict[0][2] = (25.039968, 23.637648) - flops_op_dict[0][3] = (37.486224, 35.385824) - flops_op_dict[0][4] = (29.856864, 30.862992) - flops_op_dict[0][5] = (44.711568, 46.22384) - flops_op_dict[1][0] = (11.808656, 11.86712) - flops_op_dict[1][1] = (17.68624, 17.780848) - flops_op_dict[1][2] = (13.01288, 13.87416) - flops_op_dict[1][3] = (19.492576, 20.791408) - flops_op_dict[1][4] = (14.819216, 16.88472) - flops_op_dict[1][5] = (22.20208, 25.307248) - flops_op_dict[2][0] = (8.198, 10.99632) - flops_op_dict[2][1] = (12.292848, 16.5172) - flops_op_dict[2][2] = (8.69976, 11.99984) - flops_op_dict[2][3] = (13.045488, 18.02248) - flops_op_dict[2][4] = (9.4524, 13.50512) - flops_op_dict[2][5] = (14.174448, 20.2804) - flops_op_dict[3][0] = (12.006112, 15.61632) - flops_op_dict[3][1] = (18.028752, 23.46096) - flops_op_dict[3][2] = (13.009632, 16.820544) - flops_op_dict[3][3] = (19.534032, 25.267296) - flops_op_dict[3][4] = (14.514912, 18.62688) - flops_op_dict[3][5] = (21.791952, 27.9768) - flops_op_dict[4][0] = (11.307456, 15.292416) - flops_op_dict[4][1] = (17.007072, 23.1504) - flops_op_dict[4][2] = (11.608512, 15.894528) - flops_op_dict[4][3] = (17.458656, 24.053568) - flops_op_dict[4][4] = (12.060096, 16.797696) - flops_op_dict[4][5] = (18.136032, 25.40832) - - sta_num, arch_def, size_factor = search_for_layer(flops_op_dict, arch_def, flops_minimum, flops_maximum) - - if sta_num is None or arch_def is None or size_factor is None: - raise ValueError('Invalid FLOPs Settings') - - model_kwargs = dict( - block_args=decode_arch_def(arch_def), - choices=choices, - num_features=num_features, - stem_size=16, - # channel_multiplier=channel_multiplier, - norm_kwargs=resolve_bn_args(kwargs), - act_layer=act_layer, - se_kwargs=dict(act_layer=nn.ReLU, gate_fn=hard_sigmoid, reduce_mid=True, divisor=8), - **kwargs, - ) - model = SuperNet(**model_kwargs) - return model, sta_num, size_factor - - -class Classifier(nn.Module): - def __init__(self, num_classes=1000): - super(Classifier, self).__init__() - self.classifier = nn.Linear(num_classes, num_classes) - - def forward(self, x): - return self.classifier(x) - - -if __name__ == '__main__': - arch_def = [ - # stage 0, 112x112 in - ['ds_r1_k3_s1_e1_c16_se0.25'], - # stage 1, 112x112 in - ['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', - 'ir_r1_k3_s1_e4_c24_se0.25'], - # stage 2, 56x56 in - ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s1_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', - 'ir_r1_k5_s2_e4_c40_se0.25'], - # stage 3, 28x28 in - ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', - 'ir_r2_k3_s1_e4_c80_se0.25'], - # stage 4, 14x14in - ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', - 'ir_r1_k3_s1_e6_c96_se0.25'], - # stage 5, 14x14in - ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', - 'ir_r1_k5_s2_e6_c192_se0.25'], - # stage 6, 7x7 in - ['cn_r1_k1_s1_c320_se0.25'], - ] - - flops_op_dict = {} - for i in range(5): - flops_op_dict[i] = {} - flops_op_dict[0][0] = (21.828704, 18.820752) - flops_op_dict[0][1] = (32.669328, 28.16048) - flops_op_dict[0][2] = (25.039968, 23.637648) - flops_op_dict[0][3] = (37.486224, 35.385824) - flops_op_dict[0][4] = (29.856864, 30.862992) - flops_op_dict[0][5] = (44.711568, 46.22384) - flops_op_dict[1][0] = (11.808656, 11.86712) - flops_op_dict[1][1] = (17.68624, 17.780848) - flops_op_dict[1][2] = (13.01288, 13.87416) - flops_op_dict[1][3] = (19.492576, 20.791408) - flops_op_dict[1][4] = (14.819216, 16.88472) - flops_op_dict[1][5] = (22.20208, 25.307248) - flops_op_dict[2][0] = (8.198, 10.99632) - flops_op_dict[2][1] = (12.292848, 16.5172) - flops_op_dict[2][2] = (8.69976, 11.99984) - flops_op_dict[2][3] = (13.045488, 18.02248) - flops_op_dict[2][4] = (9.4524, 13.50512) - flops_op_dict[2][5] = (14.174448, 20.2804) - flops_op_dict[3][0] = (12.006112, 15.61632) - flops_op_dict[3][1] = (18.028752, 23.46096) - flops_op_dict[3][2] = (13.009632, 16.820544) - flops_op_dict[3][3] = (19.534032, 25.267296) - flops_op_dict[3][4] = (14.514912, 18.62688) - flops_op_dict[3][5] = (21.791952, 27.9768) - flops_op_dict[4][0] = (11.307456, 15.292416) - flops_op_dict[4][1] = (17.007072, 23.1504) - flops_op_dict[4][2] = (11.608512, 15.894528) - flops_op_dict[4][3] = (17.458656, 24.053568) - flops_op_dict[4][4] = (12.060096, 16.797696) - flops_op_dict[4][5] = (18.136032, 25.40832) - - sta_num, arch_def, size_factor = search_for_layer(flops_op_dict, arch_def, 0, 20) - print(sta_num, size_factor) diff --git a/examples/nas/cream/models/model.py b/examples/nas/cream/models/model.py deleted file mode 100755 index b665e01545..0000000000 --- a/examples/nas/cream/models/model.py +++ /dev/null @@ -1,159 +0,0 @@ -import torch -import torch.nn as nn -from torch.nn import functional as F - -from nni.nas.pytorch import mutables - -from models.builder import * - -DEFAULT_CROP_PCT = 0.875 -IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) -IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) - -def _cfg(url='', **kwargs): - return { - 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), - 'crop_pct': 0.875, 'interpolation': 'bilinear', - 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, - 'first_conv': 'conv_stem', 'classifier': 'classifier', - **kwargs - } - - -_DEBUG = False - - -class ChildNet(nn.Module): - - def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, head_bias=True, - channel_multiplier=1.0, pad_type='', act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0., - se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, global_pool='avg', pool_bn=False, zero_gamma=False): - super(ChildNet, self).__init__() - - self.num_classes = num_classes - self.num_features = num_features - self.drop_rate = drop_rate - self._in_chs = in_chans - self.pool_bn = pool_bn - - # Stem - stem_size = round_channels(stem_size, channel_multiplier) - self.conv_stem = create_conv2d(self._in_chs, stem_size, 3, stride=2, padding=pad_type) - self.bn1 = norm_layer(stem_size, **norm_kwargs) - self.act1 = act_layer(inplace=True) - self._in_chs = stem_size - - # Middle stages (IR/ER/DS Blocks) - builder = ChildNetBuilder( - channel_multiplier, 8, None, 32, pad_type, act_layer, se_kwargs, - norm_layer, norm_kwargs, drop_path_rate, verbose=_DEBUG) - self.blocks = nn.Sequential(*builder(self._in_chs, block_args)) - # self.blocks = builder(self._in_chs, block_args) - self._in_chs = builder.in_chs - - # Head + Pooling - self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) - self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type, bias=head_bias) - self.act2 = act_layer(inplace=True) - - # Classifier - self.classifier = nn.Linear(self.num_features * self.global_pool.feat_mult(), self.num_classes) - - if pool_bn: - self.pool_bn = nn.BatchNorm1d(1) - - efficientnet_init_weights(self, zero_gamma=zero_gamma) - - def get_classifier(self): - return self.classifier - - def reset_classifier(self, num_classes, global_pool='avg'): - self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) - self.num_classes = num_classes - self.classifier = nn.Linear( - self.num_features * self.global_pool.feat_mult(), num_classes) if self.num_classes else None - - def forward_features(self, x): - # architecture = [[0], [], [], [], [], [0]] - x = self.conv_stem(x) - x = self.bn1(x) - x = self.act1(x) - x = self.blocks(x) - x = self.global_pool(x) - x = self.conv_head(x) - x = self.act2(x) - return x - - def forward(self, x): - x = self.forward_features(x) - x = x.flatten(1) - if self.drop_rate > 0.: - x = F.dropout(x, p=self.drop_rate, training=self.training) - x = self.classifier(x) - if self.pool_bn: - x = torch.unsqueeze(x, 1) - x = self.pool_bn(x) - x = torch.squeeze(x) - return x - - -def modify_block_args(block_args, kernel_size, exp_ratio): - # kernel_size: 3,5,7 - # exp_ratio: 4,6 - block_type = block_args['block_type'] - # each type of block has different valid arguments, fill accordingly - if block_type == 'cn': - block_args['kernel_size'] = kernel_size - elif block_type == 'er': - block_args['exp_kernel_size'] = kernel_size - else: - block_args['dw_kernel_size'] = kernel_size - - if block_type == 'ir' or block_type == 'er': - block_args['exp_ratio'] = exp_ratio - return block_args - - -def _gen_childnet(arch_list, arch_def, **kwargs): - # arch_list = [[0], [], [], [], [], [0]] - choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} - choices_list = [[x,y] for x in choices['kernel_size'] for y in choices['exp_ratio']] - - num_features = 1280 - - # act_layer = HardSwish - act_layer = Swish - - new_arch = [] - # change to child arch_def - for i, (layer_choice, layer_arch) in enumerate(zip(arch_list, arch_def)): - if len(layer_arch) == 1: - new_arch.append(layer_arch) - continue - else: - new_layer = [] - for j, (block_choice, block_arch) in enumerate(zip(layer_choice, layer_arch)): - kernel_size, exp_ratio = choices_list[block_choice] - elements = block_arch.split('_') - block_arch = block_arch.replace(elements[2], 'k{}'.format(str(kernel_size))) - block_arch = block_arch.replace(elements[4], 'e{}'.format(str(exp_ratio))) - new_layer.append(block_arch) - new_arch.append(new_layer) - - model_kwargs = dict( - block_args=decode_arch_def(new_arch), - num_features=num_features, - stem_size=16, - # channel_multiplier=channel_multiplier, - norm_kwargs=resolve_bn_args(kwargs), - act_layer=act_layer, - se_kwargs=dict(act_layer=nn.ReLU, gate_fn=hard_sigmoid, reduce_mid=True, divisor=8), - **kwargs, - ) - model = ChildNet(**model_kwargs) - return model - -# arch_list = [[0], [3, 2, 3, 3], [3, 2, 3, 1], [3, 0, 3, 2], [3, 3, 3, 3], [3, 3, 3, 3], [0]] -# model = _gen_childnet(arch_list, zero_gamma=True) - - diff --git a/examples/nas/cream/requirements.txt b/examples/nas/cream/requirements old mode 100755 new mode 100644 similarity index 79% rename from examples/nas/cream/requirements.txt rename to examples/nas/cream/requirements index 03298d97c5..5ddae72e4c --- a/examples/nas/cream/requirements.txt +++ b/examples/nas/cream/requirements @@ -7,5 +7,6 @@ git+https://github.com/sovrasov/flops-counter.pytorch.git pillow==6.1.0 torch==1.2 timm==0.1.20 -git+https://github.com/Tramac/torchscope.git +tensorboardx==1.2 tensorboard +future \ No newline at end of file diff --git a/examples/nas/cream/run.sh b/examples/nas/cream/run.sh deleted file mode 100755 index c91122daa4..0000000000 --- a/examples/nas/cream/run.sh +++ /dev/null @@ -1,6 +0,0 @@ -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ./examples/nas/cream/distributed_train.sh 8 \ - --data ./data/imagenet/ --sched spos_linear \ - --pool_size 10 --meta_sta_epoch 20 --update_iter 200 \ - --epochs 120 --batch-size 128 --warmup-epochs 0 \ - --lr 0.5 --opt-eps 0.001 \ - --color-jitter 0.06 --drop 0. -j 8 --num-classes 1000 --flops_minimum 0 --flops_maximum 600 diff --git a/examples/nas/cream/supernet.py b/examples/nas/cream/supernet.py deleted file mode 100644 index 87d26ae910..0000000000 --- a/examples/nas/cream/supernet.py +++ /dev/null @@ -1,389 +0,0 @@ -import os -import argparse -import time -import numpy as np -import logging -import torch.nn as nn -from datetime import datetime -from copy import deepcopy - -try: - from apex import amp - from apex.parallel import DistributedDataParallel as DDP - from apex.parallel import convert_syncbn_model - has_apex = True -except ImportError: - from torch.nn.parallel import DistributedDataParallel as DDP - has_apex = False - -from timm.data import Dataset, create_loader, resolve_data_config, FastCollateMixup, DatasetTar -from timm.models import create_model, resume_checkpoint - -from timm.utils import * -from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy -from timm.scheduler import create_scheduler - -from models.hypernet import _gen_supernet -from flops_table import LatencyEst - -from nni.nas.pytorch.cream import CreamSupernetTrainer -from nni.nas.pytorch.random import RandomMutator - -logger = logging.getLogger("nni.cream.supernet") - - -def add_weight_decay_supernet(model, args, weight_decay=1e-5, skip_list=()): - decay = [] - no_decay = [] - meta_layer_no_decay = [] - meta_layer_decay = [] - for name, param in model.named_parameters(): - if not param.requires_grad: - continue # frozen weights - if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: - if 'meta_layer' in name: - meta_layer_no_decay.append(param) - else: - no_decay.append(param) - else: - if 'meta_layer' in name: - meta_layer_decay.append(param) - else: - decay.append(param) - return [ - {'params': no_decay, 'weight_decay': 0., 'lr': args.lr}, - {'params': decay, 'weight_decay': weight_decay, 'lr': args.lr}, - {'params': meta_layer_no_decay, 'weight_decay': 0., 'lr': args.meta_lr}, - {'params': meta_layer_decay, 'weight_decay': 0, 'lr': args.meta_lr}, - ] - -def create_optimizer_supernet(args, model, filter_bias_and_bn=True): - from torch import optim as optim - opt_lower = args.opt.lower() - weight_decay = args.weight_decay - if 'adamw' in opt_lower or 'radam' in opt_lower: - # Compensate for the way current AdamW and RAdam optimizers apply LR to the weight-decay - # I don't believe they follow the paper or original Torch7 impl which schedules weight - # decay based on the ratio of current_lr/initial_lr - weight_decay /= args.lr - if weight_decay and filter_bias_and_bn: - parameters = add_weight_decay_supernet(model, args, weight_decay) - weight_decay = 0. - else: - parameters = model.parameters() - - if 'fused' in opt_lower: - assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' - - opt_split = opt_lower.split('_') - opt_lower = opt_split[-1] - if opt_lower == 'sgd' or opt_lower == 'nesterov': - optimizer = optim.SGD( - parameters, momentum=args.momentum, weight_decay=weight_decay, nesterov=True) - elif opt_lower == 'momentum': - optimizer = optim.SGD( - parameters, momentum=args.momentum, weight_decay=weight_decay, nesterov=False) - elif opt_lower == 'adam': - optimizer = optim.Adam( - parameters, weight_decay=weight_decay, eps=args.opt_eps) - else: - assert False and "Invalid optimizer" - raise ValueError - - return optimizer - -def main(): - parser = argparse.ArgumentParser(description='Training') - # Dataset / Model parameters - parser.add_argument('--data', metavar='DIR', - help='path to dataset') - parser.add_argument('--model', default='hypernet', type=str, metavar='MODEL', - help='Name of model to train (default: "countception"') - parser.add_argument('--pretrained', action='store_true', default=False, - help='Start with pretrained version of specified network (if avail)') - parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH', - help='Initialize model from this checkpoint (default: none)') - parser.add_argument('--resume', default='', type=str, metavar='PATH', - help='Resume full model and optimizer state from checkpoint (default: none)') - parser.add_argument('--num-classes', type=int, default=1000, metavar='N', - help='number of label classes (default: 1000)') - parser.add_argument('--gp', default='avg', type=str, metavar='POOL', - help='Type of global pool, "avg", "max", "avgmax", "avgmaxc" (default: "avg")') - parser.add_argument('--img-size', type=int, default=None, metavar='N', - help='Image patch size (default: None => model default)') - parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', - help='Override mean pixel value of dataset') - parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', - help='Override std deviation of of dataset') - parser.add_argument('--interpolation', default='', type=str, metavar='NAME', - help='Image resize interpolation type (overrides model)') - parser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N', - help='input batch size for training (default: 32)') - parser.add_argument('--drop', type=float, default=0.0, metavar='DROP', - help='Dropout rate (default: 0.)') - # Optimizer parameters - parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', - help='Optimizer (default: "sgd"') - parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', - help='Optimizer Epsilon (default: 1e-8)') - parser.add_argument('--momentum', type=float, default=0.9, metavar='M', - help='SGD momentum (default: 0.9)') - parser.add_argument('--weight-decay', type=float, default=0.0001, - help='weight decay (default: 0.0001)') - # Learning rate schedule parameters - parser.add_argument('--sched', default='spos_linear', type=str, metavar='SCHEDULER', - help='LR scheduler (default: "step"') - parser.add_argument('--lr', type=float, default=0.01, metavar='LR', - help='learning rate (default: 0.01)') - parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR', - help='warmup learning rate (default: 0.0001)') - parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR', - help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') - parser.add_argument('--epochs', type=int, default=120, metavar='N', - help='number of epochs to train (default: 2)') - parser.add_argument('--start-epoch', default=None, type=int, metavar='N', - help='manual epoch number (useful on restarts)') - parser.add_argument('--decay-epochs', type=int, default=15, metavar='N', - help='epoch interval to decay LR') - parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N', - help='epochs to warmup LR, if scheduler supports') - parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', - help='epochs to cooldown LR at min_lr, after cyclic schedule ends') - parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', - help='LR decay rate (default: 0.1)') - parser.add_argument('--grad', type=int, default=1, metavar='RATE', - help='LR decay rate (default: 0.1)') - # Augmentation parameters - parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', - help='Color jitter factor (default: 0.4)') - parser.add_argument('--reprob', type=float, default=0., metavar='PCT', - help='Random erase prob (default: 0.)') - parser.add_argument('--remode', type=str, default='const', - help='Random erase mode (default: "const")') - parser.add_argument('--mixup', type=float, default=0.0, - help='mixup alpha, mixup enabled if > 0. (default: 0.)') - parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N', - help='turn off mixup after this epoch, disabled if 0 (default: 0)') - parser.add_argument('--smoothing', type=float, default=0.1, - help='label smoothing (default: 0.1)') - # Batch norm parameters (only works with gen_efficientnet based models currently) - parser.add_argument('--bn-tf', action='store_true', default=False, - help='Use Tensorflow BatchNorm defaults for models that support it (default: False)') - parser.add_argument('--bn-momentum', type=float, default=None, - help='BatchNorm momentum override (if not None)') - parser.add_argument('--bn-eps', type=float, default=None, - help='BatchNorm epsilon override (if not None)') - # Model Exponential Moving Average - parser.add_argument('--model-ema', action='store_true', default=False, - help='Enable tracking moving average of model weights') - parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, - help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.') - parser.add_argument('--model-ema-decay', type=float, default=0.9998, - help='decay factor for model weights moving average (default: 0.9998)') - parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', - help='learning rate noise on/off epoch percentages') - parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', - help='learning rate noise limit percent (default: 0.67)') - parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', - help='learning rate noise std-dev (default: 1.0)') - # Misc - parser.add_argument('--seed', type=int, default=42, metavar='S', - help='random seed (default: 42)') - parser.add_argument('--log-interval', type=int, default=50, metavar='N', - help='how many batches to wait before logging training status') - parser.add_argument('-j', '--workers', type=int, default=4, metavar='N', - help='how many training processes to use (default: 1)') - parser.add_argument('--num-gpu', type=int, default=1, - help='Number of GPUS to use') - parser.add_argument("--local_rank", default=0, type=int) - parser.add_argument("--update_iter", default=1, type=int) - parser.add_argument("--slice", default=4, type=int) - parser.add_argument("--pool_size", default=10, type=int) - parser.add_argument('--resunit', action='store_true', default=False, - help='Start with pretrained version of specified network (if avail)') - parser.add_argument('--dil_conv', action='store_true', default=False, - help='Start with pretrained version of specified network (if avail)') - parser.add_argument('--tiny', action='store_true', default=False) - parser.add_argument('--flops_maximum', default=600, type=int) - parser.add_argument('--flops_minimum', default=0, type=int) - parser.add_argument('--pick_method', default='meta', type=str) - parser.add_argument('--meta_lr', default=1e-2, type=float) - parser.add_argument('--meta_sta_epoch', default=-1, type=int) - parser.add_argument('--how_to_prob', default='pre_prob', type=str) - parser.add_argument('--pre_prob', default=(0.05, 0.2, 0.05, 0.5, 0.05, 0.15), type=tuple) - args = parser.parse_args() - - seed = args.seed - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - np.random.seed(seed) - torch.backends.cudnn.deterministic = True - - args.distributed = False - if 'WORLD_SIZE' in os.environ: - args.distributed = int(os.environ['WORLD_SIZE']) > 1 - if args.distributed and args.num_gpu > 1: - logger.warning( - 'Using more than one GPU per process in distributed mode is not allowed. Setting num_gpu to 1.') - args.num_gpu = 1 - - args.device = 'cuda:0' - args.world_size = 1 - args.rank = 0 # global rank - if args.distributed: - args.num_gpu = 1 - args.device = 'cuda:%d' % args.local_rank - torch.cuda.set_device(args.local_rank) - import random - port = random.randint(0, 50000) - torch.distributed.init_process_group(backend='nccl', init_method='env://') # tcp://127.0.0.1:{}'.format(port), rank=args.local_rank, world_size=8) - args.world_size = torch.distributed.get_world_size() - args.rank = torch.distributed.get_rank() - assert args.rank >= 0 - - if args.distributed: - logging.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.' - % (args.rank, args.world_size)) - else: - logging.info('Training with a single process on %d GPUs.' % args.num_gpu) - - model, sta_num, size_factor = _gen_supernet( - flops_minimum=args.flops_minimum, - flops_maximum=args.flops_maximum, - num_classes=args.num_classes, - drop_rate=args.drop, - global_pool=args.gp, - resunit=args.resunit, - dil_conv=args.dil_conv, - slice=args.slice) - - if args.local_rank == 0: - print("Model Searched Using FLOPs {}".format(size_factor * 32)) - - data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) - if args.local_rank == 0: - logger.info(args) - - choice_num = 6 - if args.resunit: - choice_num += 1 - if args.dil_conv: - choice_num += 2 - - if args.local_rank == 0: - logger.info("Choice_num: {}".format(choice_num)) - - model_est = LatencyEst(model) - - if args.local_rank == 0: - logger.info('Model %s created, param count: %d' % - (args.model, sum([m.numel() for m in model.parameters()]))) - - # data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) - - # optionally resume from a checkpoint - optimizer_state = None - resume_epoch = None - if args.resume: - optimizer_state, resume_epoch = resume_checkpoint(model, args.resume) - - if args.num_gpu > 1: - if args.amp: - logging.warning( - 'AMP does not work well with nn.DataParallel, disabling. Use distributed mode for multi-GPU AMP.') - args.amp = False - model = nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda() - else: - model.cuda() - - optimizer = create_optimizer_supernet(args, model) - if optimizer_state is not None: - optimizer.load_state_dict(optimizer_state['optimizer']) - - if args.distributed: - if has_apex: - model = DDP(model, delay_allreduce=True) - else: - if args.local_rank == 0: - logger.info("Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.") - model = DDP(model, device_ids=[args.local_rank]) # can use device str in Torch >= 1.1 - # NOTE: EMA model does not need to be wrapped by DDP - - lr_scheduler, num_epochs = create_scheduler(args, optimizer) - - start_epoch = 0 - if args.start_epoch is not None: - # a specified start_epoch will always override the resume epoch - start_epoch = args.start_epoch - elif resume_epoch is not None: - start_epoch = resume_epoch - if start_epoch > 0: - lr_scheduler.step(start_epoch) - - if args.local_rank == 0: - logger.info('Scheduled epochs: {}'.format(num_epochs)) - - if args.tiny: - from dataset.tiny_imagenet import get_newimagenet - [loader_train, loader_eval], [train_sampler, test_sampler] = get_newimagenet(args.data, args.batch_size) - else: - train_dir = os.path.join(args.data, 'train') - if not os.path.exists(train_dir): - logger.error('Training folder does not exist at: {}'.format(train_dir)) - exit(1) - dataset_train = Dataset(train_dir) - - collate_fn = None - - loader_train = create_loader( - dataset_train, - input_size=data_config['input_size'], - batch_size=args.batch_size, - is_training=True, - re_prob=args.reprob, - re_mode=args.remode, - color_jitter=args.color_jitter, - interpolation='random', # FIXME cleanly resolve this? data_config['interpolation'], - mean=data_config['mean'], - std=data_config['std'], - num_workers=args.workers, - distributed=args.distributed, - collate_fn=collate_fn, - ) - - eval_dir = os.path.join(args.data, 'val') - if not os.path.isdir(eval_dir): - logger.error('Validation folder does not exist at: {}'.format(eval_dir)) - exit(1) - dataset_eval = Dataset(eval_dir) - - loader_eval = create_loader( - dataset_eval, - input_size=data_config['input_size'], - batch_size=4 * args.batch_size, - is_training=False, - interpolation=data_config['interpolation'], - mean=data_config['mean'], - std=data_config['std'], - num_workers=args.workers, - distributed=args.distributed, - ) - - criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda() - val_loss = nn.CrossEntropyLoss().cuda() - - mutator = RandomMutator(model) - - trainer = CreamSupernetTrainer(model, criterion, optimizer, args.epochs, - train_loader=loader_train, valid_loader=loader_eval, - mutator=mutator, batch_size=args.batch_size, - log_frequency=args.log_interval, est=model_est, meta_sta_epoch=args.meta_sta_epoch, - update_iter=args.update_iter, slices=args.slice, pool_size=args.pool_size, - pick_method=args.pick_method, lr_scheduler=lr_scheduler, distributed=args.distributed, - local_rank=args.local_rank, val_loss=val_loss) - trainer.train() - - -if __name__ == '__main__': - main() - diff --git a/examples/nas/cream/test.py b/examples/nas/cream/test.py deleted file mode 100755 index 8ee79d5381..0000000000 --- a/examples/nas/cream/test.py +++ /dev/null @@ -1,482 +0,0 @@ -import os -import argparse -import time -import numpy as np -import logging -import torch.nn as nn -from datetime import datetime -from copy import deepcopy - -try: - from apex import amp - from apex.parallel import DistributedDataParallel as DDP - from apex.parallel import convert_syncbn_model - has_apex = True -except ImportError: - from torch.nn.parallel import DistributedDataParallel as DDP - has_apex = False - -from dataset import Dataset, create_loader, resolve_data_config -from models.model import _gen_childnet -import torch.distributed as dist -from utils.flops_table import LatencyEst -from utils.helpers import * -from utils.EMA import ModelEma -from utils.saver import CheckpointSaver -from utils.loss import LabelSmoothingCrossEntropy -from utils.scheduler import create_scheduler -from torch.utils.tensorboard import SummaryWriter - -from nni.nas.pytorch.cream import CreamSupernetTrainer -from nni.nas.pytorch.cream import CreamSupernetTrainingMutator - -logger = logging.getLogger("nni.cream.supernet") - -def add_weight_decay_supernet(model, args, weight_decay=1e-5, skip_list=()): - decay = [] - no_decay = [] - meta_layer_no_decay = [] - meta_layer_decay = [] - for name, param in model.named_parameters(): - if not param.requires_grad: - continue # frozen weights - if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: - if 'meta_layer' in name: - meta_layer_no_decay.append(param) - else: - no_decay.append(param) - else: - if 'meta_layer' in name: - meta_layer_decay.append(param) - else: - decay.append(param) - return [ - {'params': no_decay, 'weight_decay': 0., 'lr': args.lr}, - {'params': decay, 'weight_decay': weight_decay, 'lr': args.lr}, - {'params': meta_layer_no_decay, 'weight_decay': 0., 'lr': args.meta_lr}, - {'params': meta_layer_decay, 'weight_decay': 0, 'lr': args.meta_lr}, - ] - -def create_optimizer_supernet(args, model, filter_bias_and_bn=True): - from torch import optim as optim - opt_lower = args.opt.lower() - weight_decay = args.weight_decay - if 'adamw' in opt_lower or 'radam' in opt_lower: - # Compensate for the way current AdamW and RAdam optimizers apply LR to the weight-decay - # I don't believe they follow the paper or original Torch7 impl which schedules weight - # decay based on the ratio of current_lr/initial_lr - weight_decay /= args.lr - if weight_decay and filter_bias_and_bn: - parameters = add_weight_decay_supernet(model, args, weight_decay) - weight_decay = 0. - else: - parameters = model.parameters() - - if 'fused' in opt_lower: - assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' - - opt_split = opt_lower.split('_') - opt_lower = opt_split[-1] - if opt_lower == 'sgd' or opt_lower == 'nesterov': - optimizer = optim.SGD( - parameters, momentum=args.momentum, weight_decay=weight_decay, nesterov=True) - elif opt_lower == 'momentum': - optimizer = optim.SGD( - parameters, momentum=args.momentum, weight_decay=weight_decay, nesterov=False) - elif opt_lower == 'adam': - optimizer = optim.Adam( - parameters, weight_decay=weight_decay, eps=args.opt_eps) - else: - assert False and "Invalid optimizer" - raise ValueError - - return optimizer - -def main(): - parser = argparse.ArgumentParser(description='Training') - # Dataset / Model parameters - parser.add_argument('--data', metavar='DIR', - help='path to dataset') - parser.add_argument('--model', default='hypernet', type=str, metavar='MODEL', - help='Name of model to train (default: "countception"') - parser.add_argument('--pretrained', action='store_true', default=False, - help='Start with pretrained version of specified network (if avail)') - parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH', - help='Initialize model from this checkpoint (default: none)') - parser.add_argument('--resume', default='', type=str, metavar='PATH', - help='Resume full model and optimizer state from checkpoint (default: none)') - parser.add_argument('--num-classes', type=int, default=1000, metavar='N', - help='number of label classes (default: 1000)') - parser.add_argument('--gp', default='avg', type=str, metavar='POOL', - help='Type of global pool, "avg", "max", "avgmax", "avgmaxc" (default: "avg")') - parser.add_argument('--img-size', type=int, default=None, metavar='N', - help='Image patch size (default: None => model default)') - parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', - help='Override mean pixel value of dataset') - parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', - help='Override std deviation of of dataset') - parser.add_argument('--interpolation', default='', type=str, metavar='NAME', - help='Image resize interpolation type (overrides model)') - parser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N', - help='input batch size for training (default: 32)') - parser.add_argument('--drop', type=float, default=0.0, metavar='DROP', - help='Dropout rate (default: 0.)') - # Optimizer parameters - parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', - help='Optimizer (default: "sgd"') - parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', - help='Optimizer Epsilon (default: 1e-8)') - parser.add_argument('--momentum', type=float, default=0.9, metavar='M', - help='SGD momentum (default: 0.9)') - parser.add_argument('--weight-decay', type=float, default=0.0001, - help='weight decay (default: 0.0001)') - # Learning rate schedule parameters - parser.add_argument('--sched', default='spos_linear', type=str, metavar='SCHEDULER', - help='LR scheduler (default: "step"') - parser.add_argument('--lr', type=float, default=0.01, metavar='LR', - help='learning rate (default: 0.01)') - parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR', - help='warmup learning rate (default: 0.0001)') - parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR', - help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') - parser.add_argument('--epochs', type=int, default=120, metavar='N', - help='number of epochs to train (default: 2)') - parser.add_argument('--start-epoch', default=None, type=int, metavar='N', - help='manual epoch number (useful on restarts)') - parser.add_argument('--decay-epochs', type=int, default=15, metavar='N', - help='epoch interval to decay LR') - parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N', - help='epochs to warmup LR, if scheduler supports') - parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', - help='epochs to cooldown LR at min_lr, after cyclic schedule ends') - parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', - help='LR decay rate (default: 0.1)') - parser.add_argument('--grad', type=int, default=1, metavar='RATE', - help='LR decay rate (default: 0.1)') - # Augmentation parameters - parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', - help='Color jitter factor (default: 0.4)') - parser.add_argument('--reprob', type=float, default=0., metavar='PCT', - help='Random erase prob (default: 0.)') - parser.add_argument('--remode', type=str, default='const', - help='Random erase mode (default: "const")') - parser.add_argument('--mixup', type=float, default=0.0, - help='mixup alpha, mixup enabled if > 0. (default: 0.)') - parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N', - help='turn off mixup after this epoch, disabled if 0 (default: 0)') - parser.add_argument('--smoothing', type=float, default=0.1, - help='label smoothing (default: 0.1)') - # Batch norm parameters (only works with gen_efficientnet based models currently) - parser.add_argument('--bn-tf', action='store_true', default=False, - help='Use Tensorflow BatchNorm defaults for models that support it (default: False)') - parser.add_argument('--bn-momentum', type=float, default=None, - help='BatchNorm momentum override (if not None)') - parser.add_argument('--bn-eps', type=float, default=None, - help='BatchNorm epsilon override (if not None)') - # Model Exponential Moving Average - parser.add_argument('--model-ema', action='store_true', default=False, - help='Enable tracking moving average of model weights') - parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, - help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.') - parser.add_argument('--model-ema-decay', type=float, default=0.9998, - help='decay factor for model weights moving average (default: 0.9998)') - parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', - help='learning rate noise on/off epoch percentages') - parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', - help='learning rate noise limit percent (default: 0.67)') - parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', - help='learning rate noise std-dev (default: 1.0)') - # Misc - parser.add_argument('--seed', type=int, default=42, metavar='S', - help='random seed (default: 42)') - parser.add_argument('--log-interval', type=int, default=50, metavar='N', - help='how many batches to wait before logging training status') - parser.add_argument('-j', '--workers', type=int, default=4, metavar='N', - help='how many training processes to use (default: 1)') - parser.add_argument('--num-gpu', type=int, default=1, - help='Number of GPUS to use') - parser.add_argument("--local_rank", default=0, type=int) - parser.add_argument("--update_iter", default=1, type=int) - parser.add_argument("--slice", default=4, type=int) - parser.add_argument("--pool_size", default=10, type=int) - parser.add_argument('--resunit', action='store_true', default=False, - help='Start with pretrained version of specified network (if avail)') - parser.add_argument('--dil_conv', action='store_true', default=False, - help='Start with pretrained version of specified network (if avail)') - parser.add_argument('--tiny', action='store_true', default=False) - parser.add_argument('--flops_maximum', default=600, type=int) - parser.add_argument('--flops_minimum', default=0, type=int) - parser.add_argument('--pick_method', default='meta', type=str) - parser.add_argument('--meta_lr', default=1e-2, type=float) - parser.add_argument('--meta_sta_epoch', default=-1, type=int) - parser.add_argument('--model_selection', default=14, type=int) - parser.add_argument('--how_to_prob', default='pre_prob', type=str) - parser.add_argument('--pre_prob', default=(0.05, 0.2, 0.05, 0.5, 0.05, 0.15), type=tuple) - args = parser.parse_args() - - seed = args.seed - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - np.random.seed(seed) - torch.backends.cudnn.deterministic = True - - args.distributed = False - if 'WORLD_SIZE' in os.environ: - args.distributed = int(os.environ['WORLD_SIZE']) > 1 - if args.distributed and args.num_gpu > 1: - logger.warning( - 'Using more than one GPU per process in distributed mode is not allowed. Setting num_gpu to 1.') - args.num_gpu = 1 - - args.device = 'cuda:0' - args.world_size = 1 - args.rank = 0 # global rank - if args.distributed: - args.num_gpu = 1 - args.device = 'cuda:%d' % args.local_rank - torch.cuda.set_device(args.local_rank) - import random - port = random.randint(0, 50000) - torch.distributed.init_process_group(backend='nccl', init_method='env://') # tcp://127.0.0.1:{}'.format(port), rank=args.local_rank, world_size=8) - args.world_size = torch.distributed.get_world_size() - args.rank = torch.distributed.get_rank() - assert args.rank >= 0 - - if args.distributed: - logging.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.' - % (args.rank, args.world_size)) - else: - logging.info('Training with a single process on %d GPUs.' % args.num_gpu) - - if args.model_selection == 470: - arch_list = [[0], [3, 4, 3, 1], [3, 2, 3, 0], [3, 3, 3, 1], [3, 3, 3, 3], [3, 3, 3, 3], [0]] - arch_def = [ - # stage 0, 112x112 in - ['ds_r1_k3_s1_e1_c16_se0.25'], - # stage 1, 112x112 in - ['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', - 'ir_r1_k3_s1_e4_c24_se0.25'], - # stage 2, 56x56 in - ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s1_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', - 'ir_r1_k5_s2_e4_c40_se0.25'], - # stage 3, 28x28 in - ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', - 'ir_r2_k3_s1_e4_c80_se0.25'], - # stage 4, 14x14in - ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', - 'ir_r1_k3_s1_e6_c96_se0.25'], - # stage 5, 14x14in - ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', - 'ir_r1_k5_s2_e6_c192_se0.25'], - # stage 6, 7x7 in - ['cn_r1_k1_s1_c320_se0.25'], - ] - args.img_size = 224 - elif args.model_selection == 42: - arch_list = [[0], [3], [3, 1], [3, 1], [3, 3, 3], [3, 3], [0]] - arch_def = [ - # stage 0, 112x112 in - ['ds_r1_k3_s1_e1_c16_se0.25'], - # stage 1, 112x112 in - ['ir_r1_k3_s2_e4_c24_se0.25'], - # stage 2, 56x56 in - ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'], - # stage 3, 28x28 in - ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25'], - # stage 4, 14x14in - ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'], - # stage 5, 14x14in - ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'], - # stage 6, 7x7 in - ['cn_r1_k1_s1_c320_se0.25'], - ] - args.img_size = 96 - elif args.model_selection == 14: - arch_list = [[0], [3], [3, 3], [3, 3], [3], [3], [0]] - arch_def = [ - # stage 0, 112x112 in - ['ds_r1_k3_s1_e1_c16_se0.25'], - # stage 1, 112x112 in - ['ir_r1_k3_s2_e4_c24_se0.25'], - # stage 2, 56x56 in - ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k3_s2_e4_c40_se0.25'], - # stage 3, 28x28 in - ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e4_c80_se0.25'], - # stage 4, 14x14in - ['ir_r1_k3_s1_e6_c96_se0.25'], - # stage 5, 14x14in - ['ir_r1_k5_s2_e6_c192_se0.25'], - # stage 6, 7x7 in - ['cn_r1_k1_s1_c320_se0.25'], - ] - args.img_size = 64 - elif args.model_selection == 112: - arch_list = [[0], [3], [3, 3], [3, 3], [3, 3, 3], [3, 3], [0]] - arch_def = [ - # stage 0, 112x112 in - ['ds_r1_k3_s1_e1_c16_se0.25'], - # stage 1, 112x112 in - ['ir_r1_k3_s2_e4_c24_se0.25'], - # stage 2, 56x56 in - ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k3_s2_e4_c40_se0.25'], - # stage 3, 28x28 in - ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25'], - # stage 4, 14x14in - ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'], - # stage 5, 14x14in - ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'], - # stage 6, 7x7 in - ['cn_r1_k1_s1_c320_se0.25'], - ] - args.img_size = 160 - elif args.model_selection == 285: - arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] - arch_def = [ - # stage 0, 112x112 in - ['ds_r1_k3_s1_e1_c16_se0.25'], - # stage 1, 112x112 in - ['ir_r1_k3_s2_e4_c24_se0.25'], - # stage 2, 56x56 in - ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'], - # stage 3, 28x28 in - ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25'], - # stage 4, 14x14in - ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', - 'ir_r1_k3_s1_e6_c96_se0.25'], - # stage 5, 14x14in - ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'], - # stage 6, 7x7 in - ['cn_r1_k1_s1_c320_se0.25'], - ] - args.img_size = 224 - elif args.model_selection == 600: - arch_list = [[0], [3, 3, 2, 3, 3], [3, 2, 3, 2, 3], [3, 2, 3, 2, 3], [3, 3, 2, 2, 3, 3], [3, 3, 2, 3, 3, 3], - [0]] - arch_def = [ - # stage 0, 112x112 in - ['ds_r1_k3_s1_e1_c16_se0.25'], - # stage 1, 112x112 in - ['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s2_e4_c24_se0.25', - 'ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s2_e4_c24_se0.25'], - # stage 2, 56x56 in - ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', - 'ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'], - # stage 3, 28x28 in - ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', - 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25'], - # stage 4, 14x14in - ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', - 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'], - # stage 5, 14x14in - ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', - 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], - # stage 6, 7x7 in - ['cn_r1_k1_s1_c320_se0.25'], - ] - args.img_size = 224 - - model = _gen_childnet( - arch_list, - arch_def, - num_classes=args.num_classes, - drop_rate=args.drop, - global_pool=args.gp) - - data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) - if args.local_rank == 0: - logger.info(args) - - if args.local_rank == 0: - logger.info('Model %s created, param count: %d' % - (args.model, sum([m.numel() for m in model.parameters()]))) - - # data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) - - if args.num_gpu > 1: - if args.amp: - logging.warning( - 'AMP does not work well with nn.DataParallel, disabling. Use distributed mode for multi-GPU AMP.') - args.amp = False - model = nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda() - else: - model.cuda() - - if args.distributed: - if has_apex: - model = DDP(model, delay_allreduce=True) - else: - if args.local_rank == 0: - logger.info("Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.") - model = DDP(model, device_ids=[args.local_rank]) # can use device str in Torch >= 1.1 - # NOTE: EMA model does not need to be wrapped by DDP - - model_ema = ModelEma( - model, - decay=args.model_ema_decay, - device='cpu' if args.model_ema_force_cpu else '', - resume=args.resume) - - if args.tiny: - from dataset.tiny_imagenet import get_newimagenet - [loader_train, loader_eval], [train_sampler, test_sampler] = get_newimagenet(args.data, args.batch_size) - else: - train_dir = os.path.join(args.data, 'train') - if not os.path.exists(train_dir): - logger.error('Training folder does not exist at: {}'.format(train_dir)) - exit(1) - - eval_dir = os.path.join(args.data, 'val') - if not os.path.isdir(eval_dir): - logger.error('Validation folder does not exist at: {}'.format(eval_dir)) - exit(1) - dataset_eval = Dataset(eval_dir) - - loader_eval = create_loader( - dataset_eval, - input_size=data_config['input_size'], - batch_size=4 * args.batch_size, - is_training=False, - interpolation=data_config['interpolation'], - mean=data_config['mean'], - std=data_config['std'], - num_workers=args.workers, - distributed=args.distributed, - ) - - def accuracy(output, target, topk=(1,)): - """Computes the accuracy over the k top predictions for the specified values of k""" - maxk = max(topk) - batch_size = target.size(0) - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - return [correct[:k].view(-1).float().sum(0) * 100. / batch_size for k in topk] - - prec1_m = AverageMeter() - prec5_m = AverageMeter() - - def reduce_tensor(tensor, n): - rt = tensor.clone() - dist.all_reduce(rt, op=dist.ReduceOp.SUM) - rt /= n - return rt - - model_ema.ema.eval() - - with torch.no_grad(): - for step, (x, y) in enumerate(loader_eval): - logits = model_ema.ema(x) - prec1, prec5 = accuracy(logits, y, topk=(1, 5)) - - prec1 = reduce_tensor(prec1, args.world_size) - prec5 = reduce_tensor(prec5, args.world_size) - - prec1_m.update(prec1.item(), logits.size(0)) - prec5_m.update(prec5.item(), logits.size(0)) - - if args.local_rank == 0: - logger.info("Prec1: %s Prec5: %s", prec1_m.avg, prec5_m.avg) - -if __name__ == '__main__': - main() - diff --git a/examples/nas/cream/test.sh b/examples/nas/cream/test.sh deleted file mode 100755 index 627d187b92..0000000000 --- a/examples/nas/cream/test.sh +++ /dev/null @@ -1,2 +0,0 @@ -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ./examples/nas/cream/distributed_test.sh 8 \ - --data ./data/imagenet --model_selection 285 --resume ./data/ckpts/285.pth.tar diff --git a/examples/nas/cream/tools/_init_paths.py b/examples/nas/cream/tools/_init_paths.py new file mode 100755 index 0000000000..80ce4bccac --- /dev/null +++ b/examples/nas/cream/tools/_init_paths.py @@ -0,0 +1,24 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os.path as osp +import sys + + +def add_path(path): + if path not in sys.path: + sys.path.insert(0, path) + + +this_dir = osp.dirname(__file__) +lib_path = osp.join(this_dir, '..', 'lib') +add_path(lib_path) + +lib_path = osp.join(this_dir, '..') +add_path(lib_path) diff --git a/examples/nas/cream/tools/main.py b/examples/nas/cream/tools/main.py new file mode 100644 index 0000000000..18667e45ae --- /dev/null +++ b/examples/nas/cream/tools/main.py @@ -0,0 +1,61 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import os +import shutil +import argparse +import datetime + +import _init_paths + +from lib.config import cfg + +parser = argparse.ArgumentParser(description='Cream of the Crop') +parser.add_argument('mode', type=str, default='train', + help='Mode in ["train", "retrain", "test"]') +parser.add_argument('cfg', type=str, + default='../experiments/configs/baseline.yaml', + help='configuration of creamt') +args = parser.parse_args() +cfg.merge_from_file(args.cfg) + + +def main(): + date = datetime.date.today().strftime('%m%d') + save_path = os.path.join(cfg.SAVE_PATH, "{}-{}".format(date, cfg.MODEL)) + if not os.path.exists(save_path): + os.mkdir(save_path) + + os.system( + "cp {} {}".format( + args.cfg, + os.path.join( + save_path, + 'config.yaml'))) + + if args.mode == 'train': + os.system("python -m " + "torch.distributed.launch " + "--nproc_per_node={} " + "tools/train.py " + "--cfg {}".format(cfg.NUM_GPU, args.cfg)) + elif args.mode == 'retrain': + os.system("python -m " + "torch.distributed.launch " + "--nproc_per_node={} " + "tools/retrain.py " + "--cfg {}".format(cfg.NUM_GPU, args.cfg)) + elif args.mode == 'test': + os.system("python -m " + "torch.distributed.launch " + "--nproc_per_node={} " + "tools/test.py " + "--cfg {}".format(cfg.NUM_GPU, args.cfg)) + else: + raise ValueError('Mode not supported yet!') + + +if __name__ == '__main__': + main() diff --git a/examples/nas/cream/tools/retrain.py b/examples/nas/cream/tools/retrain.py new file mode 100755 index 0000000000..78c287ba13 --- /dev/null +++ b/examples/nas/cream/tools/retrain.py @@ -0,0 +1,317 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import os +import warnings +import datetime +import torch +import numpy as np +import torch.nn as nn +import _init_paths + +from torchscope import scope +from torch.utils.tensorboard import SummaryWriter + +# import timm packages +from timm.optim import create_optimizer +from timm.models import resume_checkpoint +from timm.scheduler import create_scheduler +from timm.data import Dataset, create_loader +from timm.utils import ModelEma, update_summary +from timm.loss import LabelSmoothingCrossEntropy + +# import apex as distributed package +try: + from apex import amp + from apex.parallel import DistributedDataParallel as DDP + from apex.parallel import convert_syncbn_model + HAS_APEX = True +except ImportError: + from torch.nn.parallel import DistributedDataParallel as DDP + HAS_APEX = False + +# import models and training functions +from lib.core.test import validate +from lib.core.retrain import train_epoch +from lib.models.structures.childnet import gen_childnet +from lib.utils.util import parse_config_args, get_logger, get_model_flops_params +from lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + + +def main(): + args, cfg = parse_config_args('child net training') + + # resolve logging + output_dir = os.path.join(cfg.SAVE_PATH, + "{}-{}".format(datetime.date.today().strftime('%m%d'), + cfg.MODEL)) + + if args.local_rank == 0: + logger = get_logger(os.path.join(output_dir, 'retrain.log')) + writer = SummaryWriter(os.path.join(output_dir, 'runs')) + else: + writer, logger = None, None + + # retrain model selection + if cfg.NET.SELECTION == 470: + arch_list = [ + [0], [ + 3, 4, 3, 1], [ + 3, 2, 3, 0], [ + 3, 3, 3, 1], [ + 3, 3, 3, 3], [ + 3, 3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == 42: + arch_list = [[0], [3], [3, 1], [3, 1], [3, 3, 3], [3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 96 + elif cfg.NET.SELECTION == 14: + arch_list = [[0], [3], [3, 3], [3, 3], [3], [3], [0]] + cfg.DATASET.IMAGE_SIZE = 64 + elif cfg.NET.SELECTION == 112: + arch_list = [[0], [3], [3, 3], [3, 3], [3, 3, 3], [3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 160 + elif cfg.NET.SELECTION == 285: + arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == 600: + arch_list = [ + [0], [ + 3, 3, 2, 3, 3], [ + 3, 2, 3, 2, 3], [ + 3, 2, 3, 2, 3], [ + 3, 3, 2, 2, 3, 3], [ + 3, 3, 2, 3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + else: + raise ValueError("Model Retrain Selection is not Supported!") + + # define childnet architecture from arch_list + stem = ['ds_r1_k3_s1_e1_c16_se0.25', 'cn_r1_k1_s1_c320_se0.25'] + choice_block_pool = ['ir_r1_k3_s2_e4_c24_se0.25', + 'ir_r1_k5_s2_e4_c40_se0.25', + 'ir_r1_k3_s2_e6_c80_se0.25', + 'ir_r1_k3_s1_e6_c96_se0.25', + 'ir_r1_k3_s2_e6_c192_se0.25'] + arch_def = [[stem[0]]] + [[choice_block_pool[idx] + for repeat_times in range(len(arch_list[idx + 1]))] + for idx in range(len(choice_block_pool))] + [[stem[1]]] + + # generate childnet + model = gen_childnet( + arch_list, + arch_def, + num_classes=cfg.DATASET.NUM_CLASSES, + drop_rate=cfg.NET.DROPOUT_RATE, + global_pool=cfg.NET.GP) + + # initialize training parameters + eval_metric = cfg.EVAL_METRICS + best_metric, best_epoch, saver = None, None, None + + # initialize distributed parameters + distributed = cfg.NUM_GPU > 1 + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group(backend='nccl', init_method='env://') + if args.local_rank == 0: + logger.info( + 'Training on Process {} with {} GPUs.'.format( + args.local_rank, cfg.NUM_GPU)) + + # fix random seeds + torch.manual_seed(cfg.SEED) + torch.cuda.manual_seed_all(cfg.SEED) + np.random.seed(cfg.SEED) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + # get parameters and FLOPs of model + if args.local_rank == 0: + macs, params = get_model_flops_params(model, input_size=( + 1, 3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE)) + logger.info( + '[Model-{}] Flops: {} Params: {}'.format(cfg.NET.SELECTION, macs, params)) + + # create optimizer + optimizer = create_optimizer(cfg, model) + model = model.cuda() + + # optionally resume from a checkpoint + resume_state, resume_epoch = {}, None + if cfg.AUTO_RESUME: + resume_state, resume_epoch = resume_checkpoint(model, cfg.RESUME_PATH) + optimizer.load_state_dict(resume_state['optimizer']) + del resume_state + + model_ema = None + if cfg.NET.EMA.USE: + model_ema = ModelEma( + model, + decay=cfg.NET.EMA.DECAY, + device='cpu' if cfg.NET.EMA.FORCE_CPU else '', + resume=cfg.RESUME_PATH if cfg.AUTO_RESUME else None) + + if distributed: + if cfg.BATCHNORM.SYNC_BN: + try: + if HAS_APEX: + model = convert_syncbn_model(model) + else: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm( + model) + if args.local_rank == 0: + logger.info( + 'Converted model to use Synchronized BatchNorm.') + except Exception as e: + if args.local_rank == 0: + logger.error( + 'Failed to enable Synchronized BatchNorm. Install Apex or Torch >= 1.1 with exception {}'.format(e)) + if HAS_APEX: + model = DDP(model, delay_allreduce=True) + else: + if args.local_rank == 0: + logger.info( + "Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.") + # can use device str in Torch >= 1.1 + model = DDP(model, device_ids=[args.local_rank]) + + # imagenet train dataset + train_dir = os.path.join(cfg.DATA_DIR, 'train') + if not os.path.exists(train_dir) and args.local_rank == 0: + logger.error('Training folder does not exist at: {}'.format(train_dir)) + exit(1) + dataset_train = Dataset(train_dir) + loader_train = create_loader( + dataset_train, + input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), + batch_size=cfg.DATASET.BATCH_SIZE, + is_training=True, + color_jitter=cfg.AUGMENTATION.COLOR_JITTER, + auto_augment=cfg.AUGMENTATION.AA, + num_aug_splits=0, + crop_pct=DEFAULT_CROP_PCT, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + num_workers=cfg.WORKERS, + distributed=distributed, + collate_fn=None, + pin_memory=cfg.DATASET.PIN_MEM, + interpolation='random', + re_mode=cfg.AUGMENTATION.RE_MODE, + re_prob=cfg.AUGMENTATION.RE_PROB + ) + + # imagenet validation dataset + eval_dir = os.path.join(cfg.DATA_DIR, 'val') + if not os.path.exists(eval_dir) and args.local_rank == 0: + logger.error( + 'Validation folder does not exist at: {}'.format(eval_dir)) + exit(1) + dataset_eval = Dataset(eval_dir) + loader_eval = create_loader( + dataset_eval, + input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), + batch_size=cfg.DATASET.VAL_BATCH_MUL * cfg.DATASET.BATCH_SIZE, + is_training=False, + interpolation=cfg.DATASET.INTERPOLATION, + crop_pct=DEFAULT_CROP_PCT, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + num_workers=cfg.WORKERS, + distributed=distributed, + pin_memory=cfg.DATASET.PIN_MEM + ) + + # whether to use label smoothing + if cfg.AUGMENTATION.SMOOTHING > 0.: + train_loss_fn = LabelSmoothingCrossEntropy( + smoothing=cfg.AUGMENTATION.SMOOTHING).cuda() + validate_loss_fn = nn.CrossEntropyLoss().cuda() + else: + train_loss_fn = nn.CrossEntropyLoss().cuda() + validate_loss_fn = train_loss_fn + + # create learning rate scheduler + lr_scheduler, num_epochs = create_scheduler(cfg, optimizer) + start_epoch = resume_epoch if resume_epoch is not None else 0 + if start_epoch > 0: + lr_scheduler.step(start_epoch) + if args.local_rank == 0: + logger.info('Scheduled epochs: {}'.format(num_epochs)) + + try: + best_record, best_ep = 0, 0 + for epoch in range(start_epoch, num_epochs): + if distributed: + loader_train.sampler.set_epoch(epoch) + + train_metrics = train_epoch( + epoch, + model, + loader_train, + optimizer, + train_loss_fn, + cfg, + lr_scheduler=lr_scheduler, + saver=saver, + output_dir=output_dir, + model_ema=model_ema, + logger=logger, + writer=writer, + local_rank=args.local_rank) + + eval_metrics = validate( + epoch, + model, + loader_eval, + validate_loss_fn, + cfg, + logger=logger, + writer=writer, + local_rank=args.local_rank) + + if model_ema is not None and not cfg.NET.EMA.FORCE_CPU: + ema_eval_metrics = validate( + epoch, + model_ema.ema, + loader_eval, + validate_loss_fn, + cfg, + log_suffix='_EMA', + logger=logger, + writer=writer) + eval_metrics = ema_eval_metrics + + if lr_scheduler is not None: + lr_scheduler.step(epoch + 1, eval_metrics[eval_metric]) + + update_summary(epoch, train_metrics, eval_metrics, os.path.join( + output_dir, 'summary.csv'), write_header=best_metric is None) + + if saver is not None: + # save proper checkpoint with eval metric + save_metric = eval_metrics[eval_metric] + best_metric, best_epoch = saver.save_checkpoint( + model, optimizer, cfg, + epoch=epoch, model_ema=model_ema, metric=save_metric) + + if best_record < eval_metrics[eval_metric]: + best_record = eval_metrics[eval_metric] + best_ep = epoch + + if args.local_rank == 0: + logger.info( + '*** Best metric: {0} (epoch {1})'.format(best_record, best_ep)) + + except KeyboardInterrupt: + pass + + if best_metric is not None: + logger.info( + '*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch)) + + +if __name__ == '__main__': + main() diff --git a/examples/nas/cream/tools/test.py b/examples/nas/cream/tools/test.py new file mode 100755 index 0000000000..f09c71f3ef --- /dev/null +++ b/examples/nas/cream/tools/test.py @@ -0,0 +1,157 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import os +import warnings +import datetime +import torch +import torch.nn as nn +import _init_paths + +from torch.utils.tensorboard import SummaryWriter + +# import timm packages +from timm.utils import ModelEma +from timm.models import resume_checkpoint +from timm.data import Dataset, create_loader + +# import apex as distributed package +try: + from apex.parallel import convert_syncbn_model + from apex.parallel import DistributedDataParallel as DDP + HAS_APEX = True +except ImportError: + from torch.nn.parallel import DistributedDataParallel as DDP + HAS_APEX = False + +# import models and training functions +from lib.core.test import validate +from lib.models.structures.childnet import gen_childnet +from lib.utils.util import parse_config_args, get_logger, get_model_flops_params +from lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + + +def main(): + args, cfg = parse_config_args('child net testing') + + # resolve logging + output_dir = os.path.join(cfg.SAVE_PATH, + "{}-{}".format(datetime.date.today().strftime('%m%d'), + cfg.MODEL)) + + if args.local_rank == 0: + logger = get_logger(os.path.join(output_dir, 'test.log')) + writer = SummaryWriter(os.path.join(output_dir, 'runs')) + else: + writer, logger = None, None + + # retrain model selection + if cfg.NET.SELECTION == 470: + arch_list = [ + [0], [ + 3, 4, 3, 1], [ + 3, 2, 3, 0], [ + 3, 3, 3, 1], [ + 3, 3, 3, 3], [ + 3, 3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == 42: + arch_list = [[0], [3], [3, 1], [3, 1], [3, 3, 3], [3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 96 + elif cfg.NET.SELECTION == 14: + arch_list = [[0], [3], [3, 3], [3, 3], [3], [3], [0]] + cfg.DATASET.IMAGE_SIZE = 64 + elif cfg.NET.SELECTION == 112: + arch_list = [[0], [3], [3, 3], [3, 3], [3, 3, 3], [3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 160 + elif cfg.NET.SELECTION == 285: + arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == 600: + arch_list = [[0], [3, 3, 2, 3, 3], [3, 2, 3, 2, 3], [3, 2, 3, 2, 3], + [3, 3, 2, 2, 3, 3], [3, 3, 2, 3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + else: + raise ValueError("Model Test Selection is not Supported!") + + # define childnet architecture from arch_list + stem = ['ds_r1_k3_s1_e1_c16_se0.25', 'cn_r1_k1_s1_c320_se0.25'] + choice_block_pool = ['ir_r1_k3_s2_e4_c24_se0.25', + 'ir_r1_k5_s2_e4_c40_se0.25', + 'ir_r1_k3_s2_e6_c80_se0.25', + 'ir_r1_k3_s1_e6_c96_se0.25', + 'ir_r1_k3_s2_e6_c192_se0.25'] + arch_def = [[stem[0]]] + [[choice_block_pool[idx] + for repeat_times in range(len(arch_list[idx + 1]))] + for idx in range(len(choice_block_pool))] + [[stem[1]]] + + # generate childnet + model = gen_childnet( + arch_list, + arch_def, + num_classes=cfg.DATASET.NUM_CLASSES, + drop_rate=cfg.NET.DROPOUT_RATE, + global_pool=cfg.NET.GP) + + if args.local_rank == 0: + macs, params = get_model_flops_params(model, input_size=( + 1, 3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE)) + logger.info( + '[Model-{}] Flops: {} Params: {}'.format(cfg.NET.SELECTION, macs, params)) + + # initialize distributed parameters + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group(backend='nccl', init_method='env://') + if args.local_rank == 0: + logger.info( + "Training on Process {} with {} GPUs.".format( + args.local_rank, cfg.NUM_GPU)) + + # resume model from checkpoint + assert cfg.AUTO_RESUME is True and os.path.exists(cfg.RESUME_PATH) + _, __ = resume_checkpoint(model, cfg.RESUME_PATH) + + model = model.cuda() + + model_ema = None + if cfg.NET.EMA.USE: + # Important to create EMA model after cuda(), DP wrapper, and AMP but + # before SyncBN and DDP wrapper + model_ema = ModelEma( + model, + decay=cfg.NET.EMA.DECAY, + device='cpu' if cfg.NET.EMA.FORCE_CPU else '', + resume=cfg.RESUME_PATH) + + # imagenet validation dataset + eval_dir = os.path.join(cfg.DATA_DIR, 'val') + if not os.path.exists(eval_dir) and args.local_rank == 0: + logger.error( + 'Validation folder does not exist at: {}'.format(eval_dir)) + exit(1) + + dataset_eval = Dataset(eval_dir) + loader_eval = create_loader( + dataset_eval, + input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), + batch_size=cfg.DATASET.VAL_BATCH_MUL * cfg.DATASET.BATCH_SIZE, + is_training=False, + num_workers=cfg.WORKERS, + distributed=True, + pin_memory=cfg.DATASET.PIN_MEM, + crop_pct=DEFAULT_CROP_PCT, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD + ) + + # only test accuracy of model-EMA + validate_loss_fn = nn.CrossEntropyLoss().cuda() + validate(0, model_ema.ema, loader_eval, validate_loss_fn, cfg, + log_suffix='_EMA', logger=logger, + writer=writer, local_rank=args.local_rank) + + +if __name__ == '__main__': + main() diff --git a/examples/nas/cream/tools/train.py b/examples/nas/cream/tools/train.py new file mode 100644 index 0000000000..c82306e741 --- /dev/null +++ b/examples/nas/cream/tools/train.py @@ -0,0 +1,250 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import os +import sys +import datetime +import torch +import numpy as np +import torch.nn as nn + +import _init_paths + +# import timm packages +from timm.utils import CheckpointSaver, update_summary +from timm.loss import LabelSmoothingCrossEntropy +from timm.data import Dataset, create_loader +from timm.models import resume_checkpoint + +# import apex as distributed package +try: + from apex.parallel import DistributedDataParallel as DDP + from apex.parallel import convert_syncbn_model + USE_APEX = True +except ImportError: + from torch.nn.parallel import DistributedDataParallel as DDP + USE_APEX = False + +# import models and training functions +from lib.utils.flops_table import FlopsEst +from lib.core.train import train_epoch, validate +from lib.models.structures.supernet import gen_supernet +from lib.models.PrioritizedBoard import PrioritizedBoard +from lib.models.MetaMatchingNetwork import MetaMatchingNetwork +from lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN +from lib.utils.util import parse_config_args, get_logger, \ + create_optimizer_supernet, create_supernet_scheduler + + +def main(): + args, cfg = parse_config_args('super net training') + + # resolve logging + output_dir = os.path.join(cfg.SAVE_PATH, + "{}-{}".format(datetime.date.today().strftime('%m%d'), + cfg.MODEL)) + + if args.local_rank == 0: + logger = get_logger(os.path.join(output_dir, "train.log")) + else: + logger = None + + # initialize distributed parameters + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group(backend='nccl', init_method='env://') + if args.local_rank == 0: + logger.info( + 'Training on Process %d with %d GPUs.', + args.local_rank, cfg.NUM_GPU) + + # fix random seeds + torch.manual_seed(cfg.SEED) + torch.cuda.manual_seed_all(cfg.SEED) + np.random.seed(cfg.SEED) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + # generate supernet + model, sta_num, resolution = gen_supernet( + flops_minimum=cfg.SUPERNET.FLOPS_MINIMUM, + flops_maximum=cfg.SUPERNET.FLOPS_MAXIMUM, + num_classes=cfg.DATASET.NUM_CLASSES, + drop_rate=cfg.NET.DROPOUT_RATE, + global_pool=cfg.NET.GP, + resunit=cfg.SUPERNET.RESUNIT, + dil_conv=cfg.SUPERNET.DIL_CONV, + slice=cfg.SUPERNET.SLICE, + verbose=cfg.VERBOSE, + logger=logger) + + # initialize meta matching networks + MetaMN = MetaMatchingNetwork(cfg) + + # number of choice blocks in supernet + choice_num = len(model.blocks[1][0]) + if args.local_rank == 0: + logger.info('Supernet created, param count: %d', ( + sum([m.numel() for m in model.parameters()]))) + logger.info('resolution: %d', (resolution)) + logger.info('choice number: %d', (choice_num)) + + # initialize prioritized board + prioritized_board = PrioritizedBoard( + cfg, CHOICE_NUM=choice_num, sta_num=sta_num) + + # initialize flops look-up table + model_est = FlopsEst(model) + + # optionally resume from a checkpoint + optimizer_state = None + resume_epoch = None + if cfg.AUTO_RESUME: + optimizer_state, resume_epoch = resume_checkpoint( + model, cfg.RESUME_PATH) + + # create optimizer and resume from checkpoint + optimizer = create_optimizer_supernet(cfg, model, USE_APEX) + if optimizer_state is not None: + optimizer.load_state_dict(optimizer_state['optimizer']) + model = model.cuda() + + # convert model to distributed mode + if cfg.BATCHNORM.SYNC_BN: + try: + if USE_APEX: + model = convert_syncbn_model(model) + else: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + if args.local_rank == 0: + logger.info('Converted model to use Synchronized BatchNorm.') + except Exception as exception: + logger.info( + 'Failed to enable Synchronized BatchNorm. ' + 'Install Apex or Torch >= 1.1 with Exception %s', exception) + if USE_APEX: + model = DDP(model, delay_allreduce=True) + else: + if args.local_rank == 0: + logger.info( + "Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.") + # can use device str in Torch >= 1.1 + model = DDP(model, device_ids=[args.local_rank]) + + # create learning rate scheduler + lr_scheduler, num_epochs = create_supernet_scheduler(cfg, optimizer) + + start_epoch = resume_epoch if resume_epoch is not None else 0 + if start_epoch > 0: + lr_scheduler.step(start_epoch) + + if args.local_rank == 0: + logger.info('Scheduled epochs: %d', num_epochs) + + # imagenet train dataset + train_dir = os.path.join(cfg.DATA_DIR, 'train') + if not os.path.exists(train_dir): + logger.info('Training folder does not exist at: %s', train_dir) + sys.exit() + + dataset_train = Dataset(train_dir) + loader_train = create_loader( + dataset_train, + input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), + batch_size=cfg.DATASET.BATCH_SIZE, + is_training=True, + use_prefetcher=True, + re_prob=cfg.AUGMENTATION.RE_PROB, + re_mode=cfg.AUGMENTATION.RE_MODE, + color_jitter=cfg.AUGMENTATION.COLOR_JITTER, + interpolation='random', + num_workers=cfg.WORKERS, + distributed=True, + collate_fn=None, + crop_pct=DEFAULT_CROP_PCT, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD + ) + + # imagenet validation dataset + eval_dir = os.path.join(cfg.DATA_DIR, 'val') + if not os.path.isdir(eval_dir): + logger.info('Validation folder does not exist at: %s', eval_dir) + sys.exit() + dataset_eval = Dataset(eval_dir) + loader_eval = create_loader( + dataset_eval, + input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), + batch_size=4 * cfg.DATASET.BATCH_SIZE, + is_training=False, + use_prefetcher=True, + num_workers=cfg.WORKERS, + distributed=True, + crop_pct=DEFAULT_CROP_PCT, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + interpolation=cfg.DATASET.INTERPOLATION + ) + + # whether to use label smoothing + if cfg.AUGMENTATION.SMOOTHING > 0.: + train_loss_fn = LabelSmoothingCrossEntropy( + smoothing=cfg.AUGMENTATION.SMOOTHING).cuda() + validate_loss_fn = nn.CrossEntropyLoss().cuda() + else: + train_loss_fn = nn.CrossEntropyLoss().cuda() + validate_loss_fn = train_loss_fn + + # initialize training parameters + eval_metric = cfg.EVAL_METRICS + best_metric, best_epoch, saver, best_children_pool = None, None, None, [] + if args.local_rank == 0: + decreasing = True if eval_metric == 'loss' else False + saver = CheckpointSaver( + checkpoint_dir=output_dir, + decreasing=decreasing) + + # training scheme + try: + for epoch in range(start_epoch, num_epochs): + loader_train.sampler.set_epoch(epoch) + + # train one epoch + train_metrics = train_epoch( + epoch, + model, + loader_train, + optimizer, + train_loss_fn, + prioritized_board, + MetaMN, + cfg, + lr_scheduler=lr_scheduler, + saver=saver, + output_dir=output_dir, + logger=logger, + est=model_est, + local_rank=args.local_rank) + + # evaluate one epoch + eval_metrics = validate(model, loader_eval, validate_loss_fn, + prioritized_board, MetaMN, cfg, + local_rank=args.local_rank, logger=logger) + + update_summary(epoch, train_metrics, eval_metrics, os.path.join( + output_dir, 'summary.csv'), write_header=best_metric is None) + + if saver is not None: + # save proper checkpoint with eval metric + save_metric = eval_metrics[eval_metric] + best_metric, best_epoch = saver.save_checkpoint( + model, optimizer, cfg, + epoch=epoch, metric=save_metric) + + except KeyboardInterrupt: + pass + + +if __name__ == '__main__': + main() diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py b/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py deleted file mode 100755 index 37f154a1e9..0000000000 --- a/src/sdk/pynni/nni/nas/pytorch/cream/mutator.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. - -import logging - -from nni.nas.pytorch.random import RandomMutator - -_logger = logging.getLogger(__name__) - - -class CreamSupernetTrainingMutator(RandomMutator): - """ - A random mutator with flops limit. - - Parameters - ---------- - model : nn.Module - PyTorch model. - flops_func : callable - Callable that takes a candidate from ``sample_search`` and returns its candidate. When ``flops_func`` - is None, functions related to flops will be deactivated. - flops_lb : number - Lower bound of flops. - flops_ub : number - Upper bound of flops. - flops_bin_num : number - Number of bins divided for the interval of flops to ensure the uniformity. Bigger number will be more - uniform, but the sampling will be slower. - flops_sample_timeout : int - Maximum number of attempts to sample before giving up and use a random candidate. - """ - - def __init__(self, model, how_to_prob='even', pre_prob=(0.05, 0.05, 0.2, 0.4, 0.2, 0.1), CHOICE_NUM=6, - sta_num=(4, 4, 4, 4, 4)): - - super().__init__(model) - self.how_to_prob = how_to_prob - self.pre_prob = pre_prob - self.CHOICE_NUM = CHOICE_NUM - self.sta_num = sta_num - - def get_prob(self): - if self.how_to_prob == 'even': - return None - elif self.how_to_prob == 'pre_prob': - return self.pre_prob - else: - raise ValueError("prob method not supported") - - def sample_search(self): - """ - Sample a candidate for training. When ``flops_func`` is not None, candidates will be sampled uniformly - relative to flops. - - Returns - ------- - dict - """ - - return super(CreamSupernetTrainingMutator, self).sample_search() - - def sample_final(self): - """ - Implement only to suffice the interface of Mutator. - """ - return self.sample_search() From 4d72a7084c831a028746e98a092814c59c66d2ec Mon Sep 17 00:00:00 2001 From: mapleam Date: Sat, 21 Nov 2020 14:02:37 +0800 Subject: [PATCH 39/62] version 2.0 --- examples/nas/cream/lib/core/retrain.py | 136 ----- examples/nas/cream/lib/core/test.py | 98 ---- examples/nas/cream/lib/core/train.py | 230 -------- .../cream/lib/models/MetaMatchingNetwork.py | 167 ------ .../nas/cream/lib/models/PrioritizedBoard.py | 137 ----- examples/nas/cream/run.sh | 1 + examples/nas/cream/tools/_init_paths.py | 24 - examples/nas/cream/tools/main.py | 61 --- examples/nas/cream/tools/retrain.py | 317 ----------- examples/nas/cream/tools/test.py | 157 ------ examples/nas/cream/{tools => }/train.py | 82 +-- examples/nas/cream/train.yaml | 53 ++ .../pynni/nni/nas/pytorch/cream/trainer.py | 517 ++++++++++-------- src/sdk/pynni/nni/nas/pytorch/cream/utils.py | 34 ++ 14 files changed, 413 insertions(+), 1601 deletions(-) delete mode 100755 examples/nas/cream/lib/core/retrain.py delete mode 100755 examples/nas/cream/lib/core/test.py delete mode 100644 examples/nas/cream/lib/core/train.py delete mode 100644 examples/nas/cream/lib/models/MetaMatchingNetwork.py delete mode 100644 examples/nas/cream/lib/models/PrioritizedBoard.py create mode 100644 examples/nas/cream/run.sh delete mode 100755 examples/nas/cream/tools/_init_paths.py delete mode 100644 examples/nas/cream/tools/main.py delete mode 100755 examples/nas/cream/tools/retrain.py delete mode 100755 examples/nas/cream/tools/test.py rename examples/nas/cream/{tools => }/train.py (76%) create mode 100644 examples/nas/cream/train.yaml create mode 100644 src/sdk/pynni/nni/nas/pytorch/cream/utils.py diff --git a/examples/nas/cream/lib/core/retrain.py b/examples/nas/cream/lib/core/retrain.py deleted file mode 100755 index ca234848ab..0000000000 --- a/examples/nas/cream/lib/core/retrain.py +++ /dev/null @@ -1,136 +0,0 @@ -import os -import time -import torch -import torchvision - -from collections import OrderedDict - -from lib.utils.util import AverageMeter, accuracy, reduce_tensor - - -def train_epoch( - epoch, model, loader, optimizer, loss_fn, cfg, - lr_scheduler=None, saver=None, output_dir='', use_amp=False, - model_ema=None, logger=None, writer=None, local_rank=0): - batch_time_m = AverageMeter() - data_time_m = AverageMeter() - losses_m = AverageMeter() - prec1_m = AverageMeter() - prec5_m = AverageMeter() - - model.train() - - end = time.time() - last_idx = len(loader) - 1 - num_updates = epoch * len(loader) - optimizer.zero_grad() - for batch_idx, (input, target) in enumerate(loader): - last_batch = batch_idx == last_idx - data_time_m.update(time.time() - end) - - input = input.cuda() - target = target.cuda() - output = model(input) - - loss = loss_fn(output, target) - - prec1, prec5 = accuracy(output, target, topk=(1, 5)) - - if cfg.NUM_GPU > 1: - reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU) - prec1 = reduce_tensor(prec1, cfg.NUM_GPU) - prec5 = reduce_tensor(prec5, cfg.NUM_GPU) - else: - reduced_loss = loss.data - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - torch.cuda.synchronize() - - losses_m.update(reduced_loss.item(), input.size(0)) - prec1_m.update(prec1.item(), output.size(0)) - prec5_m.update(prec5.item(), output.size(0)) - - if model_ema is not None: - model_ema.update(model) - num_updates += 1 - - batch_time_m.update(time.time() - end) - if last_batch or batch_idx % cfg.LOG_INTERVAL == 0: - lrl = [param_group['lr'] for param_group in optimizer.param_groups] - lr = sum(lrl) / len(lrl) - - if local_rank == 0: - logger.info( - 'Train: {} [{:>4d}/{}] ' - 'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) ' - 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' - 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f}) ' - 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ' - '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' - 'LR: {lr:.3e}' - 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format( - epoch, - batch_idx, - len(loader), - loss=losses_m, - top1=prec1_m, - top5=prec5_m, - batch_time=batch_time_m, - rate=input.size(0) * - cfg.NUM_GPU / - batch_time_m.val, - rate_avg=input.size(0) * - cfg.NUM_GPU / - batch_time_m.avg, - lr=lr, - data_time=data_time_m)) - - writer.add_scalar( - 'Loss/train', - prec1_m.avg, - epoch * - len(loader) + - batch_idx) - writer.add_scalar( - 'Accuracy/train', - prec1_m.avg, - epoch * - len(loader) + - batch_idx) - writer.add_scalar( - 'Learning_Rate', - optimizer.param_groups[0]['lr'], - epoch * len(loader) + batch_idx) - - if cfg.SAVE_IMAGES and output_dir: - torchvision.utils.save_image( - input, os.path.join( - output_dir, 'train-batch-%d.jpg' % - batch_idx), padding=0, normalize=True) - - if saver is not None and cfg.RECOVERY_INTERVAL and ( - last_batch or (batch_idx + 1) % cfg.RECOVERY_INTERVAL == 0): - saver.save_recovery( - model, - optimizer, - cfg, - epoch, - model_ema=model_ema, - use_amp=use_amp, - batch_idx=batch_idx) - - if lr_scheduler is not None: - lr_scheduler.step_update( - num_updates=num_updates, - metric=losses_m.avg) - - end = time.time() - # end for - - if hasattr(optimizer, 'sync_lookahead'): - optimizer.sync_lookahead() - - return OrderedDict([('loss', losses_m.avg)]) diff --git a/examples/nas/cream/lib/core/test.py b/examples/nas/cream/lib/core/test.py deleted file mode 100755 index 69d4dabbf0..0000000000 --- a/examples/nas/cream/lib/core/test.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# Written by Hao Du and Houwen Peng -# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com - -import time -import torch - -from collections import OrderedDict -from lib.utils.util import AverageMeter, accuracy, reduce_tensor - - -def validate( - epoch, - model, - loader, - loss_fn, - cfg, - log_suffix='', - logger=None, - writer=None, - local_rank=0): - batch_time_m = AverageMeter() - losses_m = AverageMeter() - prec1_m = AverageMeter() - prec5_m = AverageMeter() - - model.eval() - - end = time.time() - last_idx = len(loader) - 1 - with torch.no_grad(): - for batch_idx, (input, target) in enumerate(loader): - last_batch = batch_idx == last_idx - - output = model(input) - if isinstance(output, (tuple, list)): - output = output[0] - - # augmentation reduction - reduce_factor = cfg.TTA - if reduce_factor > 1: - output = output.unfold( - 0, - reduce_factor, - reduce_factor).mean( - dim=2) - target = target[0:target.size(0):reduce_factor] - - loss = loss_fn(output, target) - prec1, prec5 = accuracy(output, target, topk=(1, 5)) - - if cfg.NUM_GPU > 1: - reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU) - prec1 = reduce_tensor(prec1, cfg.NUM_GPU) - prec5 = reduce_tensor(prec5, cfg.NUM_GPU) - else: - reduced_loss = loss.data - - torch.cuda.synchronize() - - losses_m.update(reduced_loss.item(), input.size(0)) - prec1_m.update(prec1.item(), output.size(0)) - prec5_m.update(prec5.item(), output.size(0)) - - batch_time_m.update(time.time() - end) - end = time.time() - if local_rank == 0 and ( - last_batch or batch_idx % - cfg.LOG_INTERVAL == 0): - log_name = 'Test' + log_suffix - logger.info( - '{0}: [{1:>4d}/{2}] ' - 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) ' - 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' - 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' - 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format( - log_name, batch_idx, last_idx, - batch_time=batch_time_m, loss=losses_m, - top1=prec1_m, top5=prec5_m)) - - writer.add_scalar( - 'Loss' + log_suffix + '/vaild', - prec1_m.avg, - epoch * len(loader) + batch_idx) - writer.add_scalar( - 'Accuracy' + - log_suffix + - '/vaild', - prec1_m.avg, - epoch * - len(loader) + - batch_idx) - - metrics = OrderedDict( - [('loss', losses_m.avg), ('prec1', prec1_m.avg), ('prec5', prec5_m.avg)]) - - return metrics diff --git a/examples/nas/cream/lib/core/train.py b/examples/nas/cream/lib/core/train.py deleted file mode 100644 index b18a7fe8db..0000000000 --- a/examples/nas/cream/lib/core/train.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# Written by Hao Du and Houwen Peng -# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com - -import time -import torchvision -import torch.nn.functional as F - -from lib.utils.util import * - - -def train_epoch( - epoch, - model, - loader, - optimizer, - loss_fn, - prioritized_board, - MetaMN, - cfg, - est=None, - logger=None, - lr_scheduler=None, - saver=None, - output_dir='', - model_ema=None, - local_rank=0): - batch_time_m = AverageMeter() - data_time_m = AverageMeter() - losses_m = AverageMeter() - kd_losses_m = AverageMeter() - prec1_m = AverageMeter() - prec5_m = AverageMeter() - - model.train() - - end = time.time() - last_idx = len(loader) - 1 - - for batch_idx, (input, target) in enumerate(loader): - last_batch = batch_idx == last_idx - data_time_m.update(time.time() - end) - - # get random architectures - prob = prioritized_board.get_prob() - random_cand = prioritized_board.get_cand_with_prob(prob) - random_cand.insert(0, [0]) - random_cand.append([0]) - - # evaluate FLOPs of candidates - # cand_flops = est.get_flops(random_cand) - - # update meta matching networks - # MetaMN.run_update(input, target, random_cand, model, optimizer, - # prioritized_board, loss_fn, epoch, batch_idx) - - # get_best_teacher - # meta_value, teacher_cand = prioritized_board.select_teacher(model, random_cand) - - if prioritized_board.board_size() == 0 or epoch <= cfg.SUPERNET.META_STA_EPOCH: - output = model(input, random_cand) - loss = loss_fn(output, target) - kd_loss, teacher_output, teacher_cand = None, None, None - else: - output = model(input, random_cand) - valid_loss = loss_fn(output, target) - - # get soft label from teacher cand - with torch.no_grad(): - teacher_output = model(input, teacher_cand).detach() - soft_label = F.softmax(teacher_output, dim=1) - kd_loss = cross_entropy_loss_with_soft_target(output, soft_label) - - loss = (meta_value * kd_loss + (2 - meta_value) * valid_loss) / 2 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - prec1, prec5 = accuracy(output, target, topk=(1, 5)) - if cfg.NUM_GPU == 1: - reduced_loss = loss.data - else: - reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU) - prec1 = reduce_tensor(prec1, cfg.NUM_GPU) - prec5 = reduce_tensor(prec5, cfg.NUM_GPU) - - # prioritized_board.update_prioritized_board(input, teacher_output, output, epoch, prec1, cand_flops, teacher_cand) - - torch.cuda.synchronize() - - if kd_loss is not None: - kd_losses_m.update(kd_loss.item(), input.size(0)) - losses_m.update(reduced_loss.item(), input.size(0)) - prec1_m.update(prec1.item(), output.size(0)) - prec5_m.update(prec5.item(), output.size(0)) - batch_time_m.update(time.time() - end) - - if lr_scheduler is not None: - lr_scheduler.step() - - if last_batch or batch_idx % cfg.LOG_INTERVAL == 0: - lrl = [param_group['lr'] for param_group in optimizer.param_groups] - lr = sum(lrl) / len(lrl) - - if local_rank == 0: - logger.info( - 'Train: {} [{:>4d}/{} ({:>3.0f}%)] ' - 'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) ' - 'KD-Loss: {kd_loss.val:>9.6f} ({kd_loss.avg:>6.4f}) ' - 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' - 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f}) ' - 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ' - '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' - 'LR: {lr:.3e} ' - 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format( - epoch, - batch_idx, len(loader), - 100. * batch_idx / last_idx, - loss=losses_m, - kd_loss=kd_losses_m, - top1=prec1_m, - top5=prec5_m, - batch_time=batch_time_m, - rate=input.size(0) * cfg.NUM_GPU / batch_time_m.val, - rate_avg=input.size(0) * cfg.NUM_GPU / batch_time_m.avg, - lr=lr, - data_time=data_time_m)) - - if cfg.SAVE_IMAGES and output_dir: - torchvision.utils.save_image( - input, os.path.join( - output_dir, 'train-batch-%d.jpg' % - batch_idx), padding=0, normalize=True) - - if saver is not None and cfg.RECOVERY_INTERVAL and ( - last_batch or (batch_idx + 1) % cfg.RECOVERY_INTERVAL == 0): - saver.save_recovery(model, optimizer, cfg, epoch, - model_ema=model_ema, batch_idx=batch_idx) - - end = time.time() - - if local_rank == 0: - for idx, i in enumerate(prioritized_board.prioritized_board): - logger.info("No.{} {}".format(idx, i[:4])) - - return OrderedDict([('loss', losses_m.avg)]) - - -def validate( - model, - loader, - loss_fn, - prioritized_board, - cfg, - log_suffix='', - local_rank=0, - logger=None): - batch_time_m = AverageMeter() - losses_m = AverageMeter() - prec1_m = AverageMeter() - prec5_m = AverageMeter() - - model.eval() - - end = time.time() - last_idx = len(loader) - 1 - - # get random child architecture - random_cand = prioritized_board.get_cand_with_prob(None) - random_cand.insert(0, [0]) - random_cand.append([0]) - - with torch.no_grad(): - for batch_idx, (input, target) in enumerate(loader): - last_batch = batch_idx == last_idx - input = input.cuda() - target = target.cuda() - - output = model(input, random_cand) - if isinstance(output, (tuple, list)): - output = output[0] - - # augmentation reduction - reduce_factor = cfg.TTA - if reduce_factor > 1: - output = output.unfold( - 0, - reduce_factor, - reduce_factor).mean( - dim=2) - target = target[0:target.size(0):reduce_factor] - - loss = loss_fn(output, target) - prec1, prec5 = accuracy(output, target, topk=(1, 5)) - - if cfg.NUM_GPU > 1: - reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU) - prec1 = reduce_tensor(prec1, cfg.NUM_GPU) - prec5 = reduce_tensor(prec5, cfg.NUM_GPU) - else: - reduced_loss = loss.data - - torch.cuda.synchronize() - - losses_m.update(reduced_loss.item(), input.size(0)) - prec1_m.update(prec1.item(), output.size(0)) - prec5_m.update(prec5.item(), output.size(0)) - - batch_time_m.update(time.time() - end) - end = time.time() - if local_rank == 0 and ( - last_batch or batch_idx % - cfg.LOG_INTERVAL == 0): - log_name = 'Test' + log_suffix - logger.info( - '{0}: [{1:>4d}/{2}] ' - 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) ' - 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' - 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' - 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format( - log_name, batch_idx, last_idx, - batch_time=batch_time_m, loss=losses_m, - top1=prec1_m, top5=prec5_m)) - - metrics = OrderedDict( - [('loss', losses_m.avg), ('prec1', prec1_m.avg), ('prec5', prec5_m.avg)]) - - return metrics diff --git a/examples/nas/cream/lib/models/MetaMatchingNetwork.py b/examples/nas/cream/lib/models/MetaMatchingNetwork.py deleted file mode 100644 index fc6dcb60e5..0000000000 --- a/examples/nas/cream/lib/models/MetaMatchingNetwork.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# Written by Hao Du and Houwen Peng -# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com - -import torch -import torch.nn.functional as F - -from copy import deepcopy - -from lib.utils.util import cross_entropy_loss_with_soft_target - - -class MetaMatchingNetwork(): - def __init__(self, cfg): - self.cfg = cfg - - # only update student network weights - def update_student_weights_only( - self, - random_cand, - grad_1, - optimizer, - model): - for weight, grad_item in zip( - model.module.rand_parameters(random_cand), grad_1): - weight.grad = grad_item - torch.nn.utils.clip_grad_norm_( - model.module.rand_parameters(random_cand), 1) - optimizer.step() - for weight, grad_item in zip( - model.module.rand_parameters(random_cand), grad_1): - del weight.grad - - # only update meta networks weights - def update_meta_weights_only( - self, - random_cand, - teacher_cand, - model, - optimizer, - grad_teacher): - for weight, grad_item in zip(model.module.rand_parameters( - teacher_cand, self.cfg.SUPERNET.PICK_METHOD == 'meta'), grad_teacher): - weight.grad = grad_item - - # clip gradients - torch.nn.utils.clip_grad_norm_( - model.module.rand_parameters( - random_cand, self.cfg.SUPERNET.PICK_METHOD == 'meta'), 1) - - optimizer.step() - for weight, grad_item in zip(model.module.rand_parameters( - teacher_cand, self.cfg.SUPERNET.PICK_METHOD == 'meta'), grad_teacher): - del weight.grad - - # simulate sgd updating - def simulate_sgd_update(self, w, g, optimizer): - return g * optimizer.param_groups[-1]['lr'] + w - - # split training images into several slices - def get_minibatch_input(self, input): - slice = self.cfg.SUPERNET.SLICE - x = deepcopy(input[:slice].clone().detach()) - return x - - def calculate_1st_gradient(self, kd_loss, model, random_cand, optimizer): - optimizer.zero_grad() - grad = torch.autograd.grad( - kd_loss, - model.module.rand_parameters(random_cand), - create_graph=True) - return grad - - def calculate_2nd_gradient( - self, - validation_loss, - model, - optimizer, - random_cand, - teacher_cand, - students_weight): - optimizer.zero_grad() - grad_student_val = torch.autograd.grad( - validation_loss, - model.module.rand_parameters(random_cand), - retain_graph=True) - - grad_teacher = torch.autograd.grad( - students_weight[0], - model.module.rand_parameters( - teacher_cand, - self.cfg.SUPERNET.PICK_METHOD == 'meta'), - grad_outputs=grad_student_val) - return grad_teacher - - # forward training data - def forward_training( - self, - x, - model, - random_cand, - teacher_cand, - meta_value): - output = model(x, random_cand) - with torch.no_grad(): - teacher_output = model(x, teacher_cand) - soft_label = F.softmax(teacher_output, dim=1) - kd_loss = meta_value * \ - cross_entropy_loss_with_soft_target(output, soft_label) - return kd_loss - - # forward validation data - def forward_validation(self, input, target, random_cand, model, loss_fn): - slice = self.cfg.SUPERNET.SLICE - x = input[slice:slice * 2].clone() - output_2 = model(x, random_cand) - validation_loss = loss_fn(output_2, target[slice:slice * 2]) - return validation_loss - - def isUpdate(self, current_epoch, batch_idx, prioritized_board): - isUpdate = True - isUpdate &= (current_epoch > self.cfg.SUPERNET.META_STA_EPOCH) - isUpdate &= (batch_idx > 0) - isUpdate &= (batch_idx % self.cfg.SUPERNET.UPDATE_ITER == 0) - isUpdate &= (prioritized_board.board_size() > 0) - return isUpdate - - # update meta matching networks - def run_update(self, input, target, random_cand, model, optimizer, - prioritized_board, loss_fn, current_epoch, batch_idx): - if self.isUpdate(current_epoch, batch_idx, prioritized_board): - x = self.get_minibatch_input(input) - - meta_value, teacher_cand = prioritized_board.select_teacher( - model, random_cand) - - kd_loss = self.forward_training( - x, model, random_cand, teacher_cand, meta_value) - - # calculate 1st gradient - grad_1st = self.calculate_1st_gradient( - kd_loss, model, random_cand, optimizer) - - # simulate updated student weights - students_weight = [ - self.simulate_sgd_update( - p, grad_item, optimizer) for p, grad_item in zip( - model.module.rand_parameters(random_cand), grad_1st)] - - # update student weights - self.update_student_weights_only( - random_cand, grad_1st, optimizer, model) - - validation_loss = self.forward_validation( - input, target, random_cand, model, loss_fn) - - # calculate 2nd gradient - grad_teacher = self.calculate_2nd_gradient( - validation_loss, model, optimizer, random_cand, teacher_cand, students_weight) - - # update meta matching networks - self.update_meta_weights_only( - random_cand, teacher_cand, model, optimizer, grad_teacher) - - # delete internal variants - del grad_teacher, grad_1st, x, validation_loss, kd_loss, students_weight diff --git a/examples/nas/cream/lib/models/PrioritizedBoard.py b/examples/nas/cream/lib/models/PrioritizedBoard.py deleted file mode 100644 index e38bedc903..0000000000 --- a/examples/nas/cream/lib/models/PrioritizedBoard.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# Written by Hao Du and Houwen Peng -# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com - -import numpy as np -import torch.nn.functional as F - -from copy import deepcopy - - -class PrioritizedBoard(): - def __init__(self, cfg, CHOICE_NUM=6, sta_num=(4, 4, 4, 4, 4), acc_gap=5): - self.cfg = cfg - self.prioritized_board = [] - self.choice_num = CHOICE_NUM - self.sta_num = sta_num - self.acc_gap = acc_gap - - # select teacher from prioritized board - - def select_teacher(self, model, random_cand): - if self.cfg.SUPERNET.PICK_METHOD == 'top1': - meta_value, teacher_cand = 0.5, sorted( - self.prioritized_board, reverse=True)[0][3] - elif self.cfg.SUPERNET.PICK_METHOD == 'meta': - meta_value, cand_idx, teacher_cand = -1000000000, -1, None - for now_idx, item in enumerate(self.prioritized_board): - inputx = item[4] - output = F.softmax(model(inputx, random_cand), dim=1) - weight = model.module.forward_meta(output - item[5]) - if weight > meta_value: - meta_value = weight - cand_idx = now_idx - teacher_cand = self.prioritized_board[cand_idx][3] - assert teacher_cand is not None - meta_value = F.sigmoid(-weight) - else: - raise ValueError('Method Not supported') - - return meta_value, teacher_cand - - def board_size(self): - return len(self.prioritized_board) - - # get prob from config file - - def get_prob(self): - if self.cfg.SUPERNET.HOW_TO_PROB == 'even' or ( - self.cfg.SUPERNET.HOW_TO_PROB == 'teacher' and len( - self.prioritized_board) == 0): - return None - elif self.cfg.SUPERNET.HOW_TO_PROB == 'pre_prob': - return self.cfg.SUPERNET.PRE_PROB - elif self.cfg.SUPERNET.HOW_TO_PROB == 'teacher': - op_dict = {} - for i in range(self.choice_num): - op_dict[i] = 0 - for item in self.prioritized_board: - cand = item[3] - for block in cand: - for op in block: - op_dict[op] += 1 - sum_op = 0 - for i in range(self.choice_num): - sum_op = sum_op + op_dict[i] - prob = [] - for i in range(self.choice_num): - prob.append(float(op_dict[i]) / sum_op) - del op_dict, sum_op - return prob - - # sample random architecture - - def get_cand_with_prob(self, prob=None): - if prob is None: - get_random_cand = [ - np.random.choice( - self.choice_num, - item).tolist() for item in self.sta_num] - else: - get_random_cand = [ - np.random.choice( - self.choice_num, - item, - prob).tolist() for item in self.sta_num] - - return get_random_cand - - def isUpdate(self, current_epoch, prec1, flops): - if current_epoch <= self.cfg.SUPERNET.META_STA_EPOCH: - return False - - if len(self.prioritized_board) < self.cfg.SUPERNET.POOL_SIZE: - return True - - if prec1 > self.prioritized_board[-1][1] + self.acc_gap: - return True - - if prec1 > self.prioritized_board[-1][1] and flops < self.prioritized_board[-1][2]: - return True - - return False - - def update_prioritized_board( - self, - inputs, - teacher_output, - outputs, - current_epoch, - prec1, - flops, - cand): - if self.isUpdate(current_epoch, prec1, flops): - val_prec1 = prec1 - training_data = deepcopy(inputs[:self.cfg.SUPERNET.SLICE].detach()) - if len(self.prioritized_board) == 0: - features = deepcopy(outputs[:self.cfg.SUPERNET.SLICE].detach()) - else: - features = deepcopy( - teacher_output[:self.cfg.SUPERNET.SLICE].detach()) - self.prioritized_board.append( - (val_prec1, - prec1, - flops, - cand, - training_data, - F.softmax( - features, - dim=1))) - self.prioritized_board = sorted( - self.prioritized_board, reverse=True) - - if len(self.prioritized_board) > self.cfg.SUPERNET.POOL_SIZE: - self.prioritized_board = sorted( - self.prioritized_board, reverse=True) - del self.prioritized_board[-1] diff --git a/examples/nas/cream/run.sh b/examples/nas/cream/run.sh new file mode 100644 index 0000000000..3bf9375ac3 --- /dev/null +++ b/examples/nas/cream/run.sh @@ -0,0 +1 @@ +python -m torch.distributed.launch --nproc_per_node=8 tools/retrain.py --cfg ./train.yaml \ No newline at end of file diff --git a/examples/nas/cream/tools/_init_paths.py b/examples/nas/cream/tools/_init_paths.py deleted file mode 100755 index 80ce4bccac..0000000000 --- a/examples/nas/cream/tools/_init_paths.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# Written by Hao Du and Houwen Peng -# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os.path as osp -import sys - - -def add_path(path): - if path not in sys.path: - sys.path.insert(0, path) - - -this_dir = osp.dirname(__file__) -lib_path = osp.join(this_dir, '..', 'lib') -add_path(lib_path) - -lib_path = osp.join(this_dir, '..') -add_path(lib_path) diff --git a/examples/nas/cream/tools/main.py b/examples/nas/cream/tools/main.py deleted file mode 100644 index 18667e45ae..0000000000 --- a/examples/nas/cream/tools/main.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# Written by Hao Du and Houwen Peng -# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com - -import os -import shutil -import argparse -import datetime - -import _init_paths - -from lib.config import cfg - -parser = argparse.ArgumentParser(description='Cream of the Crop') -parser.add_argument('mode', type=str, default='train', - help='Mode in ["train", "retrain", "test"]') -parser.add_argument('cfg', type=str, - default='../experiments/configs/baseline.yaml', - help='configuration of creamt') -args = parser.parse_args() -cfg.merge_from_file(args.cfg) - - -def main(): - date = datetime.date.today().strftime('%m%d') - save_path = os.path.join(cfg.SAVE_PATH, "{}-{}".format(date, cfg.MODEL)) - if not os.path.exists(save_path): - os.mkdir(save_path) - - os.system( - "cp {} {}".format( - args.cfg, - os.path.join( - save_path, - 'config.yaml'))) - - if args.mode == 'train': - os.system("python -m " - "torch.distributed.launch " - "--nproc_per_node={} " - "tools/train.py " - "--cfg {}".format(cfg.NUM_GPU, args.cfg)) - elif args.mode == 'retrain': - os.system("python -m " - "torch.distributed.launch " - "--nproc_per_node={} " - "tools/retrain.py " - "--cfg {}".format(cfg.NUM_GPU, args.cfg)) - elif args.mode == 'test': - os.system("python -m " - "torch.distributed.launch " - "--nproc_per_node={} " - "tools/test.py " - "--cfg {}".format(cfg.NUM_GPU, args.cfg)) - else: - raise ValueError('Mode not supported yet!') - - -if __name__ == '__main__': - main() diff --git a/examples/nas/cream/tools/retrain.py b/examples/nas/cream/tools/retrain.py deleted file mode 100755 index 78c287ba13..0000000000 --- a/examples/nas/cream/tools/retrain.py +++ /dev/null @@ -1,317 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# Written by Hao Du and Houwen Peng -# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com - -import os -import warnings -import datetime -import torch -import numpy as np -import torch.nn as nn -import _init_paths - -from torchscope import scope -from torch.utils.tensorboard import SummaryWriter - -# import timm packages -from timm.optim import create_optimizer -from timm.models import resume_checkpoint -from timm.scheduler import create_scheduler -from timm.data import Dataset, create_loader -from timm.utils import ModelEma, update_summary -from timm.loss import LabelSmoothingCrossEntropy - -# import apex as distributed package -try: - from apex import amp - from apex.parallel import DistributedDataParallel as DDP - from apex.parallel import convert_syncbn_model - HAS_APEX = True -except ImportError: - from torch.nn.parallel import DistributedDataParallel as DDP - HAS_APEX = False - -# import models and training functions -from lib.core.test import validate -from lib.core.retrain import train_epoch -from lib.models.structures.childnet import gen_childnet -from lib.utils.util import parse_config_args, get_logger, get_model_flops_params -from lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD - - -def main(): - args, cfg = parse_config_args('child net training') - - # resolve logging - output_dir = os.path.join(cfg.SAVE_PATH, - "{}-{}".format(datetime.date.today().strftime('%m%d'), - cfg.MODEL)) - - if args.local_rank == 0: - logger = get_logger(os.path.join(output_dir, 'retrain.log')) - writer = SummaryWriter(os.path.join(output_dir, 'runs')) - else: - writer, logger = None, None - - # retrain model selection - if cfg.NET.SELECTION == 470: - arch_list = [ - [0], [ - 3, 4, 3, 1], [ - 3, 2, 3, 0], [ - 3, 3, 3, 1], [ - 3, 3, 3, 3], [ - 3, 3, 3, 3], [0]] - cfg.DATASET.IMAGE_SIZE = 224 - elif cfg.NET.SELECTION == 42: - arch_list = [[0], [3], [3, 1], [3, 1], [3, 3, 3], [3, 3], [0]] - cfg.DATASET.IMAGE_SIZE = 96 - elif cfg.NET.SELECTION == 14: - arch_list = [[0], [3], [3, 3], [3, 3], [3], [3], [0]] - cfg.DATASET.IMAGE_SIZE = 64 - elif cfg.NET.SELECTION == 112: - arch_list = [[0], [3], [3, 3], [3, 3], [3, 3, 3], [3, 3], [0]] - cfg.DATASET.IMAGE_SIZE = 160 - elif cfg.NET.SELECTION == 285: - arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] - cfg.DATASET.IMAGE_SIZE = 224 - elif cfg.NET.SELECTION == 600: - arch_list = [ - [0], [ - 3, 3, 2, 3, 3], [ - 3, 2, 3, 2, 3], [ - 3, 2, 3, 2, 3], [ - 3, 3, 2, 2, 3, 3], [ - 3, 3, 2, 3, 3, 3], [0]] - cfg.DATASET.IMAGE_SIZE = 224 - else: - raise ValueError("Model Retrain Selection is not Supported!") - - # define childnet architecture from arch_list - stem = ['ds_r1_k3_s1_e1_c16_se0.25', 'cn_r1_k1_s1_c320_se0.25'] - choice_block_pool = ['ir_r1_k3_s2_e4_c24_se0.25', - 'ir_r1_k5_s2_e4_c40_se0.25', - 'ir_r1_k3_s2_e6_c80_se0.25', - 'ir_r1_k3_s1_e6_c96_se0.25', - 'ir_r1_k3_s2_e6_c192_se0.25'] - arch_def = [[stem[0]]] + [[choice_block_pool[idx] - for repeat_times in range(len(arch_list[idx + 1]))] - for idx in range(len(choice_block_pool))] + [[stem[1]]] - - # generate childnet - model = gen_childnet( - arch_list, - arch_def, - num_classes=cfg.DATASET.NUM_CLASSES, - drop_rate=cfg.NET.DROPOUT_RATE, - global_pool=cfg.NET.GP) - - # initialize training parameters - eval_metric = cfg.EVAL_METRICS - best_metric, best_epoch, saver = None, None, None - - # initialize distributed parameters - distributed = cfg.NUM_GPU > 1 - torch.cuda.set_device(args.local_rank) - torch.distributed.init_process_group(backend='nccl', init_method='env://') - if args.local_rank == 0: - logger.info( - 'Training on Process {} with {} GPUs.'.format( - args.local_rank, cfg.NUM_GPU)) - - # fix random seeds - torch.manual_seed(cfg.SEED) - torch.cuda.manual_seed_all(cfg.SEED) - np.random.seed(cfg.SEED) - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - # get parameters and FLOPs of model - if args.local_rank == 0: - macs, params = get_model_flops_params(model, input_size=( - 1, 3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE)) - logger.info( - '[Model-{}] Flops: {} Params: {}'.format(cfg.NET.SELECTION, macs, params)) - - # create optimizer - optimizer = create_optimizer(cfg, model) - model = model.cuda() - - # optionally resume from a checkpoint - resume_state, resume_epoch = {}, None - if cfg.AUTO_RESUME: - resume_state, resume_epoch = resume_checkpoint(model, cfg.RESUME_PATH) - optimizer.load_state_dict(resume_state['optimizer']) - del resume_state - - model_ema = None - if cfg.NET.EMA.USE: - model_ema = ModelEma( - model, - decay=cfg.NET.EMA.DECAY, - device='cpu' if cfg.NET.EMA.FORCE_CPU else '', - resume=cfg.RESUME_PATH if cfg.AUTO_RESUME else None) - - if distributed: - if cfg.BATCHNORM.SYNC_BN: - try: - if HAS_APEX: - model = convert_syncbn_model(model) - else: - model = torch.nn.SyncBatchNorm.convert_sync_batchnorm( - model) - if args.local_rank == 0: - logger.info( - 'Converted model to use Synchronized BatchNorm.') - except Exception as e: - if args.local_rank == 0: - logger.error( - 'Failed to enable Synchronized BatchNorm. Install Apex or Torch >= 1.1 with exception {}'.format(e)) - if HAS_APEX: - model = DDP(model, delay_allreduce=True) - else: - if args.local_rank == 0: - logger.info( - "Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.") - # can use device str in Torch >= 1.1 - model = DDP(model, device_ids=[args.local_rank]) - - # imagenet train dataset - train_dir = os.path.join(cfg.DATA_DIR, 'train') - if not os.path.exists(train_dir) and args.local_rank == 0: - logger.error('Training folder does not exist at: {}'.format(train_dir)) - exit(1) - dataset_train = Dataset(train_dir) - loader_train = create_loader( - dataset_train, - input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), - batch_size=cfg.DATASET.BATCH_SIZE, - is_training=True, - color_jitter=cfg.AUGMENTATION.COLOR_JITTER, - auto_augment=cfg.AUGMENTATION.AA, - num_aug_splits=0, - crop_pct=DEFAULT_CROP_PCT, - mean=IMAGENET_DEFAULT_MEAN, - std=IMAGENET_DEFAULT_STD, - num_workers=cfg.WORKERS, - distributed=distributed, - collate_fn=None, - pin_memory=cfg.DATASET.PIN_MEM, - interpolation='random', - re_mode=cfg.AUGMENTATION.RE_MODE, - re_prob=cfg.AUGMENTATION.RE_PROB - ) - - # imagenet validation dataset - eval_dir = os.path.join(cfg.DATA_DIR, 'val') - if not os.path.exists(eval_dir) and args.local_rank == 0: - logger.error( - 'Validation folder does not exist at: {}'.format(eval_dir)) - exit(1) - dataset_eval = Dataset(eval_dir) - loader_eval = create_loader( - dataset_eval, - input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), - batch_size=cfg.DATASET.VAL_BATCH_MUL * cfg.DATASET.BATCH_SIZE, - is_training=False, - interpolation=cfg.DATASET.INTERPOLATION, - crop_pct=DEFAULT_CROP_PCT, - mean=IMAGENET_DEFAULT_MEAN, - std=IMAGENET_DEFAULT_STD, - num_workers=cfg.WORKERS, - distributed=distributed, - pin_memory=cfg.DATASET.PIN_MEM - ) - - # whether to use label smoothing - if cfg.AUGMENTATION.SMOOTHING > 0.: - train_loss_fn = LabelSmoothingCrossEntropy( - smoothing=cfg.AUGMENTATION.SMOOTHING).cuda() - validate_loss_fn = nn.CrossEntropyLoss().cuda() - else: - train_loss_fn = nn.CrossEntropyLoss().cuda() - validate_loss_fn = train_loss_fn - - # create learning rate scheduler - lr_scheduler, num_epochs = create_scheduler(cfg, optimizer) - start_epoch = resume_epoch if resume_epoch is not None else 0 - if start_epoch > 0: - lr_scheduler.step(start_epoch) - if args.local_rank == 0: - logger.info('Scheduled epochs: {}'.format(num_epochs)) - - try: - best_record, best_ep = 0, 0 - for epoch in range(start_epoch, num_epochs): - if distributed: - loader_train.sampler.set_epoch(epoch) - - train_metrics = train_epoch( - epoch, - model, - loader_train, - optimizer, - train_loss_fn, - cfg, - lr_scheduler=lr_scheduler, - saver=saver, - output_dir=output_dir, - model_ema=model_ema, - logger=logger, - writer=writer, - local_rank=args.local_rank) - - eval_metrics = validate( - epoch, - model, - loader_eval, - validate_loss_fn, - cfg, - logger=logger, - writer=writer, - local_rank=args.local_rank) - - if model_ema is not None and not cfg.NET.EMA.FORCE_CPU: - ema_eval_metrics = validate( - epoch, - model_ema.ema, - loader_eval, - validate_loss_fn, - cfg, - log_suffix='_EMA', - logger=logger, - writer=writer) - eval_metrics = ema_eval_metrics - - if lr_scheduler is not None: - lr_scheduler.step(epoch + 1, eval_metrics[eval_metric]) - - update_summary(epoch, train_metrics, eval_metrics, os.path.join( - output_dir, 'summary.csv'), write_header=best_metric is None) - - if saver is not None: - # save proper checkpoint with eval metric - save_metric = eval_metrics[eval_metric] - best_metric, best_epoch = saver.save_checkpoint( - model, optimizer, cfg, - epoch=epoch, model_ema=model_ema, metric=save_metric) - - if best_record < eval_metrics[eval_metric]: - best_record = eval_metrics[eval_metric] - best_ep = epoch - - if args.local_rank == 0: - logger.info( - '*** Best metric: {0} (epoch {1})'.format(best_record, best_ep)) - - except KeyboardInterrupt: - pass - - if best_metric is not None: - logger.info( - '*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch)) - - -if __name__ == '__main__': - main() diff --git a/examples/nas/cream/tools/test.py b/examples/nas/cream/tools/test.py deleted file mode 100755 index f09c71f3ef..0000000000 --- a/examples/nas/cream/tools/test.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# Written by Hao Du and Houwen Peng -# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com - -import os -import warnings -import datetime -import torch -import torch.nn as nn -import _init_paths - -from torch.utils.tensorboard import SummaryWriter - -# import timm packages -from timm.utils import ModelEma -from timm.models import resume_checkpoint -from timm.data import Dataset, create_loader - -# import apex as distributed package -try: - from apex.parallel import convert_syncbn_model - from apex.parallel import DistributedDataParallel as DDP - HAS_APEX = True -except ImportError: - from torch.nn.parallel import DistributedDataParallel as DDP - HAS_APEX = False - -# import models and training functions -from lib.core.test import validate -from lib.models.structures.childnet import gen_childnet -from lib.utils.util import parse_config_args, get_logger, get_model_flops_params -from lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD - - -def main(): - args, cfg = parse_config_args('child net testing') - - # resolve logging - output_dir = os.path.join(cfg.SAVE_PATH, - "{}-{}".format(datetime.date.today().strftime('%m%d'), - cfg.MODEL)) - - if args.local_rank == 0: - logger = get_logger(os.path.join(output_dir, 'test.log')) - writer = SummaryWriter(os.path.join(output_dir, 'runs')) - else: - writer, logger = None, None - - # retrain model selection - if cfg.NET.SELECTION == 470: - arch_list = [ - [0], [ - 3, 4, 3, 1], [ - 3, 2, 3, 0], [ - 3, 3, 3, 1], [ - 3, 3, 3, 3], [ - 3, 3, 3, 3], [0]] - cfg.DATASET.IMAGE_SIZE = 224 - elif cfg.NET.SELECTION == 42: - arch_list = [[0], [3], [3, 1], [3, 1], [3, 3, 3], [3, 3], [0]] - cfg.DATASET.IMAGE_SIZE = 96 - elif cfg.NET.SELECTION == 14: - arch_list = [[0], [3], [3, 3], [3, 3], [3], [3], [0]] - cfg.DATASET.IMAGE_SIZE = 64 - elif cfg.NET.SELECTION == 112: - arch_list = [[0], [3], [3, 3], [3, 3], [3, 3, 3], [3, 3], [0]] - cfg.DATASET.IMAGE_SIZE = 160 - elif cfg.NET.SELECTION == 285: - arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] - cfg.DATASET.IMAGE_SIZE = 224 - elif cfg.NET.SELECTION == 600: - arch_list = [[0], [3, 3, 2, 3, 3], [3, 2, 3, 2, 3], [3, 2, 3, 2, 3], - [3, 3, 2, 2, 3, 3], [3, 3, 2, 3, 3, 3], [0]] - cfg.DATASET.IMAGE_SIZE = 224 - else: - raise ValueError("Model Test Selection is not Supported!") - - # define childnet architecture from arch_list - stem = ['ds_r1_k3_s1_e1_c16_se0.25', 'cn_r1_k1_s1_c320_se0.25'] - choice_block_pool = ['ir_r1_k3_s2_e4_c24_se0.25', - 'ir_r1_k5_s2_e4_c40_se0.25', - 'ir_r1_k3_s2_e6_c80_se0.25', - 'ir_r1_k3_s1_e6_c96_se0.25', - 'ir_r1_k3_s2_e6_c192_se0.25'] - arch_def = [[stem[0]]] + [[choice_block_pool[idx] - for repeat_times in range(len(arch_list[idx + 1]))] - for idx in range(len(choice_block_pool))] + [[stem[1]]] - - # generate childnet - model = gen_childnet( - arch_list, - arch_def, - num_classes=cfg.DATASET.NUM_CLASSES, - drop_rate=cfg.NET.DROPOUT_RATE, - global_pool=cfg.NET.GP) - - if args.local_rank == 0: - macs, params = get_model_flops_params(model, input_size=( - 1, 3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE)) - logger.info( - '[Model-{}] Flops: {} Params: {}'.format(cfg.NET.SELECTION, macs, params)) - - # initialize distributed parameters - torch.cuda.set_device(args.local_rank) - torch.distributed.init_process_group(backend='nccl', init_method='env://') - if args.local_rank == 0: - logger.info( - "Training on Process {} with {} GPUs.".format( - args.local_rank, cfg.NUM_GPU)) - - # resume model from checkpoint - assert cfg.AUTO_RESUME is True and os.path.exists(cfg.RESUME_PATH) - _, __ = resume_checkpoint(model, cfg.RESUME_PATH) - - model = model.cuda() - - model_ema = None - if cfg.NET.EMA.USE: - # Important to create EMA model after cuda(), DP wrapper, and AMP but - # before SyncBN and DDP wrapper - model_ema = ModelEma( - model, - decay=cfg.NET.EMA.DECAY, - device='cpu' if cfg.NET.EMA.FORCE_CPU else '', - resume=cfg.RESUME_PATH) - - # imagenet validation dataset - eval_dir = os.path.join(cfg.DATA_DIR, 'val') - if not os.path.exists(eval_dir) and args.local_rank == 0: - logger.error( - 'Validation folder does not exist at: {}'.format(eval_dir)) - exit(1) - - dataset_eval = Dataset(eval_dir) - loader_eval = create_loader( - dataset_eval, - input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), - batch_size=cfg.DATASET.VAL_BATCH_MUL * cfg.DATASET.BATCH_SIZE, - is_training=False, - num_workers=cfg.WORKERS, - distributed=True, - pin_memory=cfg.DATASET.PIN_MEM, - crop_pct=DEFAULT_CROP_PCT, - mean=IMAGENET_DEFAULT_MEAN, - std=IMAGENET_DEFAULT_STD - ) - - # only test accuracy of model-EMA - validate_loss_fn = nn.CrossEntropyLoss().cuda() - validate(0, model_ema.ema, loader_eval, validate_loss_fn, cfg, - log_suffix='_EMA', logger=logger, - writer=writer, local_rank=args.local_rank) - - -if __name__ == '__main__': - main() diff --git a/examples/nas/cream/tools/train.py b/examples/nas/cream/train.py similarity index 76% rename from examples/nas/cream/tools/train.py rename to examples/nas/cream/train.py index c82306e741..2a176bade8 100644 --- a/examples/nas/cream/tools/train.py +++ b/examples/nas/cream/train.py @@ -29,7 +29,6 @@ # import models and training functions from lib.utils.flops_table import FlopsEst -from lib.core.train import train_epoch, validate from lib.models.structures.supernet import gen_supernet from lib.models.PrioritizedBoard import PrioritizedBoard from lib.models.MetaMatchingNetwork import MetaMatchingNetwork @@ -37,14 +36,20 @@ from lib.utils.util import parse_config_args, get_logger, \ create_optimizer_supernet, create_supernet_scheduler +from nni.nas.pytorch.callbacks import LRSchedulerCallback +from nni.nas.pytorch.callbacks import ModelCheckpoint +from nni.nas.pytorch.cream import CreamSupernetTrainer +from nni.nas.pytorch.random import RandomMutator def main(): - args, cfg = parse_config_args('super net training') + args, cfg = parse_config_args('nni.cream.supernet') # resolve logging output_dir = os.path.join(cfg.SAVE_PATH, "{}-{}".format(datetime.date.today().strftime('%m%d'), cfg.MODEL)) + if not os.path.exists(output_dir): + os.mkdir(output_dir) if args.local_rank == 0: logger = get_logger(os.path.join(output_dir, "train.log")) @@ -79,9 +84,6 @@ def main(): verbose=cfg.VERBOSE, logger=logger) - # initialize meta matching networks - MetaMN = MetaMatchingNetwork(cfg) - # number of choice blocks in supernet choice_num = len(model.blocks[1][0]) if args.local_rank == 0: @@ -90,12 +92,9 @@ def main(): logger.info('resolution: %d', (resolution)) logger.info('choice number: %d', (choice_num)) - # initialize prioritized board - prioritized_board = PrioritizedBoard( - cfg, CHOICE_NUM=choice_num, sta_num=sta_num) - # initialize flops look-up table model_est = FlopsEst(model) + flops_dict, flops_fixed = model_est.flops_dict, model_est.flops_fixed # optionally resume from a checkpoint optimizer_state = None @@ -196,54 +195,23 @@ def main(): train_loss_fn = nn.CrossEntropyLoss().cuda() validate_loss_fn = train_loss_fn - # initialize training parameters - eval_metric = cfg.EVAL_METRICS - best_metric, best_epoch, saver, best_children_pool = None, None, None, [] - if args.local_rank == 0: - decreasing = True if eval_metric == 'loss' else False - saver = CheckpointSaver( - checkpoint_dir=output_dir, - decreasing=decreasing) - - # training scheme - try: - for epoch in range(start_epoch, num_epochs): - loader_train.sampler.set_epoch(epoch) - - # train one epoch - train_metrics = train_epoch( - epoch, - model, - loader_train, - optimizer, - train_loss_fn, - prioritized_board, - MetaMN, - cfg, - lr_scheduler=lr_scheduler, - saver=saver, - output_dir=output_dir, - logger=logger, - est=model_est, - local_rank=args.local_rank) - - # evaluate one epoch - eval_metrics = validate(model, loader_eval, validate_loss_fn, - prioritized_board, MetaMN, cfg, - local_rank=args.local_rank, logger=logger) - - update_summary(epoch, train_metrics, eval_metrics, os.path.join( - output_dir, 'summary.csv'), write_header=best_metric is None) - - if saver is not None: - # save proper checkpoint with eval metric - save_metric = eval_metrics[eval_metric] - best_metric, best_epoch = saver.save_checkpoint( - model, optimizer, cfg, - epoch=epoch, metric=save_metric) - - except KeyboardInterrupt: - pass + mutator = RandomMutator(model) + + trainer = CreamSupernetTrainer(model, train_loss_fn, validate_loss_fn, + optimizer, num_epochs, loader_train, loader_eval, + mutator=mutator, batch_size=cfg.DATASET.BATCH_SIZE, + log_frequency=cfg.LOG_FREQUENCY, + meta_sta_epoch=cfg.SUPERNET.META_STA_EPOCH, + update_iter=cfg.SUPERNET.UPDATE_ITER, + slices=cfg.SUPERNET.SLICE, + pool_size=cfg.SUPERNET.POOL_SIZE, + pick_method=cfg.SUPERNET.PICK_METHOD, + choice_num=choice_num, sta_num=sta_num, acc_gap=cfg.ACC_GAP, + flops_dict=flops_dict, flops_fixed=flops_fixed, local_rank=args.local_rank, + callbacks=[LRSchedulerCallback(lr_scheduler), + ModelCheckpoint(output_dir)]) + + trainer.train() if __name__ == '__main__': diff --git a/examples/nas/cream/train.yaml b/examples/nas/cream/train.yaml new file mode 100644 index 0000000000..2545cd5959 --- /dev/null +++ b/examples/nas/cream/train.yaml @@ -0,0 +1,53 @@ +AUTO_RESUME: False +DATA_DIR: './data/imagenet' +MODEL: 'Supernet_Training' +RESUME_PATH: './experiments/workspace/train/resume.pth.tar' +SAVE_PATH: './checkpoints' +SEED: 42 +LOG_INTERVAL: 50 +RECOVERY_INTERVAL: 0 +WORKERS: 8 +NUM_GPU: 8 +SAVE_IMAGES: False +AMP: False +OUTPUT: 'None' +EVAL_METRICS: 'prec1' +TTA: 0 +LOCAL_RANK: 0 + +DATASET: + NUM_CLASSES: 1000 + IMAGE_SIZE: 224 # image patch size + INTERPOLATION: 'bilinear' # Image resize interpolation type + BATCH_SIZE: 128 # batch size + +NET: + GP: 'avg' + DROPOUT_RATE: 0.0 + + EMA: + USE: True + FORCE_CPU: False # force model ema to be tracked on CPU + DECAY: 0.9998 + +OPT: 'sgd' +LR: 1.0 +EPOCHS: 120 +META_LR: 1e-4 + +BATCHNORM: + SYNC_BN: False + +SUPERNET: + UPDATE_ITER: 200 + SLICE: 4 + POOL_SIZE: 10 + RESUNIT: False + DIL_CONV: False + UPDATE_2ND: True + FLOPS_MINIMUM: 0 + FLOPS_MAXIMUM: 600 + PICK_METHOD: 'meta' + META_STA_EPOCH: 20 + HOW_TO_PROB: 'pre_prob' + PRE_PROB: (0.05,0.2,0.05,0.5,0.05,0.15) \ No newline at end of file diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py index a245d91b44..a4fbc1c106 100644 --- a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py +++ b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py @@ -2,21 +2,21 @@ # Licensed under the MIT license. import os +import torch import logging + from copy import deepcopy -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.distributed as dist from nni.nas.pytorch.trainer import Trainer from nni.nas.pytorch.utils import AverageMeterGroup +from .utils import accuracy, reduce_metrics + logger = logging.getLogger(__name__) class CreamSupernetTrainer(Trainer): """ - This trainer trains a supernet that can be used for evolution search. + This trainer trains a supernet and output prioritized architectures that can be used for other tasks. Parameters ---------- @@ -24,6 +24,8 @@ class CreamSupernetTrainer(Trainer): Model with mutables. loss : callable Called with logits and targets. Returns a loss tensor. + val_loss : callable + Called with logits and targets for validation only. Returns a loss tensor. optimizer : Optimizer Optimizer that optimizes the model. num_epochs : int @@ -38,266 +40,347 @@ class CreamSupernetTrainer(Trainer): Batch size. log_frequency : int Number of mini-batches to log metrics. - est : object - look-up table of flops and parameters meta_sta_epoch : int - starting epoch of using meta picking + start epoch of using meta matching network to pick teacher architecture update_iter : int - interval of updating meta networks + interval of updating meta matching networks slices : int - batch size of mini slices + batch size of mini training data in the process of training meta matching network pool_size : int board size pick_method : basestring how to pick teacher network - lr_scheduler : scheduler - Learning rate scheduler - distributed : bool - whether to use distributed training + choice_num : int + number of operations in supernet + sta_num : int + layer number of each stage in supernet (5 stage in supernet) + acc_gap : int + maximum accuracy improvement to omit the limitation of flops + flops_dict : Dict + dictionary of each layer's operations in supernet + flops_fixed : int + flops of fixed part in supernet local_rank : int index of current rank - val_loss : callable - calculate validation loss + callbacks : list of Callback + Callbacks to plug into the trainer. See Callbacks. """ - def __init__(self, model, loss, + + def __init__(self, model, loss, val_loss, optimizer, num_epochs, train_loader, valid_loader, mutator=None, batch_size=64, log_frequency=None, - est=None, meta_sta_epoch=20, update_iter=200, slices=2, pool_size=10, - pick_method='meta', lr_scheduler=None, distributed=True, local_rank=0, val_loss=None): + meta_sta_epoch=20, update_iter=200, slices=2, + pool_size=10, pick_method='meta', choice_num=6, + sta_num=(4, 4, 4, 4, 4), acc_gap=5, + flops_dict=None, flops_fixed=0, local_rank=0, callbacks=None): assert torch.cuda.is_available() - super(CreamSupernetTrainer, self).__init__(model, mutator, loss, None, optimizer, num_epochs, - train_loader, valid_loader, batch_size, 8, - 'cuda', log_frequency, None) + super(CreamSupernetTrainer, self).__init__(model, mutator, loss, None, + optimizer, num_epochs, None, None, + batch_size, None, None, log_frequency, callbacks) + self.model = model + self.loss = loss + self.val_loss = val_loss self.train_loader = train_loader self.valid_loader = valid_loader self.log_frequency = log_frequency self.batch_size = batch_size - self.mutator = mutator self.optimizer = optimizer self.model = model self.loss = loss - self.est = est - self.best_children_pool = [] self.num_epochs = num_epochs self.meta_sta_epoch = meta_sta_epoch self.update_iter = update_iter self.slices = slices self.pick_method = pick_method self.pool_size = pool_size - self.main_proc = not distributed or local_rank == 0 - self.distributed = distributed - self.val_loss = val_loss - self.lr_scheduler = lr_scheduler - self.callbacks = [] - self.arch_dict = dict() - - def cross_entropy_loss_with_soft_target(self, pred, soft_target): - logsoftmax = nn.LogSoftmax() - return torch.mean(torch.sum(-soft_target * logsoftmax(pred), 1)) - - def reduce_tensor(self, tensor): - rt = tensor.clone() - dist.all_reduce(rt, op=dist.ReduceOp.SUM) - rt /= float(os.environ["WORLD_SIZE"]) - return rt - - def reduce_metrics(self, metrics, distributed=False): - if distributed: - return {k: self.reduce_tensor(v).item() for k, v in metrics.items()} - return {k: v.item() for k, v in metrics.items()} - - def accuracy(self, output, target, topk=(1,)): - """Computes the accuracy over the k top predictions for the specified values of k""" - maxk = max(topk) - batch_size = target.size(0) - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - return [correct[:k].view(-1).float().sum(0) * 100. / batch_size for k in topk] + self.local_rank = local_rank + self.main_proc = (local_rank == 0) + self.choice_num = choice_num + self.sta_num = sta_num + self.acc_gap = acc_gap + self.flops_dict = flops_dict + self.flops_fixed = flops_fixed + + self.current_student_arch = None + self.current_teacher_arch = None + + self.prioritized_board = [] + + # size of prioritized board + def _board_size(self): + return len(self.prioritized_board) + + # select teacher architecture according to the logit difference + def _select_teacher(self): + self._replace_mutator_cand(self.current_student_arch) + + if self.pick_method == 'top1': + meta_value, teacher_cand = 0.5, sorted( + self.prioritized_board, reverse=True)[0][3] + elif self.pick_method == 'meta': + meta_value, cand_idx, teacher_cand = -1000000000, -1, None + for now_idx, item in enumerate(self.prioritized_board): + inputx = item[4] + output = torch.nn.functional.softmax(self.model(inputx), dim=1) + weight = self.model.module.forward_meta(output - item[5]) + if weight > meta_value: + meta_value = weight + cand_idx = now_idx + teacher_cand = self.prioritized_board[cand_idx][3] + assert teacher_cand is not None + meta_value = torch.nn.functional.sigmoid(-weight) + else: + raise ValueError('Method Not supported') + + return meta_value, teacher_cand + + # check whether to update prioritized board + def _isUpdateBoard(self, prec1, flops): + if self.current_epoch <= self.meta_sta_epoch: + return False + + if len(self.prioritized_board) < self.pool_size: + return True + + if prec1 > self.prioritized_board[-1][1] + self.acc_gap: + return True + + if prec1 > self.prioritized_board[-1][1] and flops < self.prioritized_board[-1][2]: + return True + + return False + + # update prioritized board + def _update_prioritized_board(self, inputs, teacher_output, outputs, prec1, flops): + if self._isUpdateBoard(prec1, flops): + val_prec1 = prec1 + training_data = deepcopy(inputs[:self.slices].detach()) + if len(self.prioritized_board) == 0: + features = deepcopy(outputs[:self.slices].detach()) + else: + features = deepcopy( + teacher_output[:self.slices].detach()) + self.prioritized_board.append( + (val_prec1, + prec1, + flops, + self.current_teacher_arch, + training_data, + torch.nn.functional.softmax( + features, + dim=1))) + self.prioritized_board = sorted( + self.prioritized_board, reverse=True) + + if len(self.prioritized_board) > self.pool_size: + self.prioritized_board = sorted( + self.prioritized_board, reverse=True) + del self.prioritized_board[-1] + + # only update student network weights + def _update_student_weights_only(self, grad_1): + for weight, grad_item in zip( + self.model.module.rand_parameters(self.current_student_arch), grad_1): + weight.grad = grad_item + torch.nn.utils.clip_grad_norm_( + self.model.module.rand_parameters(self.current_student_arch), 1) + self.optimizer.step() + for weight, grad_item in zip( + self.model.module.rand_parameters(self.current_student_arch), grad_1): + del weight.grad + + # only update meta networks weights + def _update_meta_weights_only(self, teacher_cand, grad_teacher): + for weight, grad_item in zip(self.model.module.rand_parameters( + teacher_cand, self.pick_method == 'meta'), grad_teacher): + weight.grad = grad_item + + # clip gradients + torch.nn.utils.clip_grad_norm_( + self.model.module.rand_parameters( + self.current_student_arch, self.pick_method == 'meta'), 1) + + self.optimizer.step() + for weight, grad_item in zip(self.model.module.rand_parameters( + teacher_cand, self.pick_method == 'meta'), grad_teacher): + del weight.grad + + # simulate sgd updating + def _simulate_sgd_update(self, w, g, optimizer): + return g * optimizer.param_groups[-1]['lr'] + w + + # split training images into several slices + def _get_minibatch_input(self, input): + slice = self.slices + x = deepcopy(input[:slice].clone().detach()) + return x + + # calculate 1st gradient of student architectures + def _calculate_1st_gradient(self, kd_loss): + self.optimizer.zero_grad() + grad = torch.autograd.grad( + kd_loss, + self.model.module.rand_parameters(self.current_student_arch), + create_graph=True) + return grad + + # calculate 2nd gradient of meta networks + def _calculate_2nd_gradient(self, validation_loss, teacher_cand, students_weight): + self.optimizer.zero_grad() + grad_student_val = torch.autograd.grad( + validation_loss, + self.model.module.rand_parameters(self.random_cand), + retain_graph=True) + + grad_teacher = torch.autograd.grad( + students_weight[0], + self.model.module.rand_parameters( + teacher_cand, + self.pick_method == 'meta'), + grad_outputs=grad_student_val) + return grad_teacher + + # forward training data + def _forward_training(self, x, meta_value): + self._replace_mutator_cand(self.current_student_arch) + output = self.model(x) - def train_one_epoch(self, epoch): - def get_model(model): - return model.module + with torch.no_grad(): + self._replace_mutator_cand(self.current_teacher_arch) + teacher_output = self.model(x) + soft_label = torch.nn.functional.softmax(teacher_output, dim=1) + + kd_loss = meta_value * \ + self._cross_entropy_loss_with_soft_target(output, soft_label) + return kd_loss + + # calculate soft target loss + def _cross_entropy_loss_with_soft_target(self, pred, soft_target): + logsoftmax = torch.nn.LogSoftmax() + return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1)) + + # forward validation data + def _forward_validation(self, input, target): + slice = self.slices + x = input[slice:slice * 2].clone() + + self._replace_mutator_cand(self.current_student_arch) + output_2 = self.model(x) + + validation_loss = self.loss(output_2, target[slice:slice * 2]) + return validation_loss + + def _isUpdateMeta(self, batch_idx): + isUpdate = True + isUpdate &= (self.current_epoch > self.meta_sta_epoch) + isUpdate &= (batch_idx > 0) + isUpdate &= (batch_idx % self.update_iter == 0) + isUpdate &= (self._board_size() > 0) + return isUpdate + + def _replace_mutator_cand(self, cand): + self.mutator._cache = cand + + # update meta matching networks + def _run_update(self, input, target, batch_idx): + if self._isUpdateMeta(batch_idx): + x = self._get_minibatch_input(input) + + meta_value, teacher_cand = self._select_teacher() + + kd_loss = self._forward_training(x, meta_value) + # calculate 1st gradient + grad_1st = self._calculate_1st_gradient(kd_loss) + + # simulate updated student weights + students_weight = [ + self._simulate_sgd_update( + p, grad_item, self.optimizer) for p, grad_item in zip( + self.model.module.rand_parameters(self.current_student_arch), grad_1st)] + + # update student weights + self._update_student_weights_only(grad_1st) + + validation_loss = self._forward_validation(input, target) + + # calculate 2nd gradient + grad_teacher = self._calculate_2nd_gradient(validation_loss, teacher_cand, students_weight) + + # update meta matching networks + self._update_meta_weights_only(teacher_cand, grad_teacher) + + # delete internal variants + del grad_teacher, grad_1st, x, validation_loss, kd_loss, students_weight + + def _get_cand_flops(self, cand): + flops = 0 + for block_id, block in enumerate(cand): + for module_id, choice in enumerate(block): + if choice == -1: + continue + flops += self.flops_dict[block_id][module_id][choice] + return flops + self.flops_fixed + + def train_one_epoch(self, epoch): meters = AverageMeterGroup() for step, (input_data, target) in enumerate(self.train_loader): - self.optimizer.zero_grad() self.mutator.reset() + self.current_student_arch = self.mutator._cache - input_data = input_data.cuda() - target = target.cuda() - - cand_flops = self.est.get_flops(self.mutator._cache) - - if epoch > self.meta_sta_epoch and step > 0 and step % self.update_iter == 0: - - slice_ind = self.slices - x = deepcopy(input_data[:slice_ind].clone().detach()) - - if self.best_children_pool: - if self.pick_method == 'top1': - meta_value, cand = 1, sorted(self.best_children_pool, reverse=True)[0][3] - elif self.pick_method == 'meta': - meta_value, cand_idx, cand = -1000000000, -1, None - for now_idx, item in enumerate(self.best_children_pool): - inputx = item['input'] - output = F.softmax(self.model(inputx), dim=1) - weight = get_model(self.model).forward_meta(output - item['feature_map']) - if weight > meta_value: - meta_value = weight # deepcopy(torch.nn.functional.sigmoid(weight)) - cand_idx = now_idx - cand = self.arch_dict[(self.best_children_pool[cand_idx]['acc'], - self.best_children_pool[cand_idx]['arch_list'])] - assert cand is not None - meta_value = torch.nn.functional.sigmoid(-weight) - else: - raise ValueError('Method Not supported') - - u_output = self.model(x) - - saved_cache = self.mutator._cache - self.mutator._cache = cand - u_teacher_output = self.model(x) - self.mutator._cache = saved_cache - - u_soft_label = F.softmax(u_teacher_output, dim=1) - kd_loss = meta_value * self.cross_entropy_loss_with_soft_target(u_output, u_soft_label) - self.optimizer.zero_grad() - - grad_1 = torch.autograd.grad(kd_loss, - get_model(self.model).rand_parameters(self.mutator._cache), - create_graph=True) - - def raw_sgd(w, g): - return g * self.optimizer.param_groups[-1]['lr'] + w - - students_weight = [raw_sgd(p, grad_item) - for p, grad_item in - zip(get_model(self.model).rand_parameters(self.mutator._cache), grad_1)] - - # update student weights - for weight, grad_item in zip(get_model(self.model).rand_parameters(self.mutator._cache), grad_1): - weight.grad = grad_item - torch.nn.utils.clip_grad_norm_(get_model(self.model).rand_parameters(self.mutator._cache), 1) - self.optimizer.step() - for weight, grad_item in zip(get_model(self.model).rand_parameters(self.mutator._cache), grad_1): - del weight.grad - - held_out_x = deepcopy(input_data[slice_ind:slice_ind * 2].clone().detach()) - output_2 = self.model(held_out_x) - valid_loss = self.loss(output_2, target[slice_ind:slice_ind * 2]) - self.optimizer.zero_grad() - - grad_student_val = torch.autograd.grad(valid_loss, - get_model(self.model).rand_parameters(self.mutator._cache), - retain_graph=True) - - grad_teacher = torch.autograd.grad(students_weight[0], - get_model(self.model).rand_parameters(cand, - self.pick_method == 'meta'), - grad_outputs=grad_student_val) - - # update teacher model - for weight, grad_item in zip( - get_model(self.model).rand_parameters(cand, self.pick_method == 'meta'), - grad_teacher): - weight.grad = grad_item - torch.nn.utils.clip_grad_norm_( - get_model(self.model).rand_parameters(self.mutator._cache, self.pick_method == 'meta'), 1) - self.optimizer.step() - for weight, grad_item in zip( - get_model(self.model).rand_parameters(cand, self.pick_method == 'meta'), - grad_teacher): - del weight.grad - - for item in students_weight: - del item - del grad_teacher, grad_1, grad_student_val, x, held_out_x - del valid_loss, kd_loss, u_soft_label, u_output, u_teacher_output, output_2 - - else: - raise ValueError("Must 1nd or 2nd update teacher weights") - - # get_best_teacher - if self.best_children_pool: - if self.pick_method == 'top1': - meta_value, cand = 0.5, sorted(self.best_children_pool, reverse=True)[0][3] - elif self.pick_method == 'meta': - meta_value, cand_idx, cand = -1000000000, -1, None - for now_idx, item in enumerate(self.best_children_pool): - inputx = item['input'] - output = F.softmax(self.model(inputx), dim=1) - weight = get_model(self.model).forward_meta(output - item['feature_map']) - if weight > meta_value: - meta_value = weight - cand_idx = now_idx - cand = self.arch_dict[(self.best_children_pool[cand_idx]['acc'], - self.best_children_pool[cand_idx]['arch_list'])] - assert cand is not None - meta_value = torch.nn.functional.sigmoid(-weight) - else: - raise ValueError('Method Not supported') - if not self.best_children_pool: - output = self.model(input_data) - loss = self.loss(output, target) - kd_loss = loss - elif epoch <= self.meta_sta_epoch: + input_data, target = input_data.cuda(), target.cuda() + + # calculate flops of current architecture + cand_flops = self._get_cand_flops(self.mutator._cache) + + # update meta matching network + self._run_update(input_data, target, step) + + # select teacher architecture + meta_value, teacher_cand = self._select_teacher() + self.current_teacher_arch = teacher_cand + + # forward supernet + if self._board_size() == 0 or epoch <= self.meta_sta_epoch: + self._replace_mutator_cand(self.current_student_arch) output = self.model(input_data) + loss = self.loss(output, target) else: + self._replace_mutator_cand(self.current_student_arch) output = self.model(input_data) - with torch.no_grad(): - # save student arch - saved_cache = self.mutator._cache - self.mutator._cache = cand - # forward + gt_loss = self.loss(output, target) + + with torch.no_grad(): + self._replace_mutator_cand(self.current_teacher_arch) teacher_output = self.model(input_data).detach() - # restore student arch - self.mutator._cache = saved_cache - soft_label = F.softmax(teacher_output, dim=1) - kd_loss = self.cross_entropy_loss_with_soft_target(output, soft_label) - valid_loss = self.loss(output, target) - loss = (meta_value * kd_loss + (2 - meta_value) * valid_loss) / 2 + soft_label = torch.nn.functional.softmax(teacher_output, dim=1) + kd_loss = self._cross_entropy_loss_with_soft_target(output, soft_label) + + loss = (meta_value * kd_loss + (2 - meta_value) * gt_loss) / 2 + # update network self.optimizer.zero_grad() loss.backward() self.optimizer.step() - prec1, prec5 = self.accuracy(output, target, topk=(1, 5)) + # update metrics + prec1, prec5 = accuracy(output, target, topk=(1, 5)) metrics = {"prec1": prec1, "prec5": prec5, "loss": loss} - metrics = self.reduce_metrics(metrics, self.distributed) + metrics = reduce_metrics(metrics) meters.update(metrics) - if epoch > self.meta_sta_epoch and ( - (len(self.best_children_pool) < self.pool_size) or (prec1 > self.best_children_pool[-1]['acc'] + 5) or - (prec1 > self.best_children_pool[-1]['acc'] and cand_flops < self.best_children_pool[-1]['flops'])): - val_prec1 = prec1 - training_data = deepcopy(input_data[:self.slices].detach()) - if not self.best_children_pool: - features = deepcopy(output[:self.slices].detach()) - else: - features = deepcopy(teacher_output[:self.slices].detach()) - self.best_children_pool.append( - {'acc': val_prec1, 'accu': prec1, 'flops': cand_flops, 'input': training_data, - 'feature_map': F.softmax(features, dim=1)}) - self.arch_dict[(val_prec1, cand_flops)] = self.mutator._cache - self.best_children_pool = sorted(self.best_children_pool, key=lambda x: x['acc'], reverse=True) - - if len(self.best_children_pool) > self.pool_size: - self.best_children_pool = sorted(self.best_children_pool, key=lambda x: x['acc'], reverse=True) - del self.best_children_pool[-1] - - if self.lr_scheduler is not None: - self.lr_scheduler.step() - - if self.main_proc and self.log_frequency is not None and step % self.log_frequency == 0: - logger.info("Epoch [%s/%s] Step [%s/%s] %s", epoch + 1, - self.num_epochs, step + 1, len(self.train_loader), meters) - - if self.main_proc: + # update prioritized board + self._update_prioritized_board(input_data, teacher_output, output, metrics['top1'], cand_flops) + + if self.main_proc and (step % self.log_frequency == 0 or step + 1 == self.steps_per_epoch): + self.logger.info("Epoch [%d/%d] Step [%d/%d] %s", epoch + 1, self.num_epochs, + step + 1, len(self.train_loader), meters) + + if self.main_proc and self.num_epochs == epoch + 1: for idx, i in enumerate(self.best_children_pool): logger.info("No.%s %s", idx, i[:4]) diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/utils.py b/src/sdk/pynni/nni/nas/pytorch/cream/utils.py new file mode 100644 index 0000000000..b72f1d8d75 --- /dev/null +++ b/src/sdk/pynni/nni/nas/pytorch/cream/utils.py @@ -0,0 +1,34 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + + +import os +import torch.distributed as dist + +def accuracy(output, target, topk=(1,)): + """ Computes the precision@k for the specified values of k """ + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + # one-hot case + if target.ndimension() > 1: + target = target.max(1)[1] + + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(1.0 / batch_size)) + return res + +def reduce_metrics(metrics): + return {k: reduce_tensor(v).item() for k, v in metrics.items()} + +def reduce_tensor(tensor): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= float(os.environ["WORLD_SIZE"]) + return rt \ No newline at end of file From 37518fa2851351898237a45010906ce896802001 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 23 Nov 2020 09:07:21 +0800 Subject: [PATCH 40/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 39c2a91372..95cd846573 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -1,4 +1,5 @@ -# Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search +# Cream of the Crop: +# Distilling Prioritized Paths For One-Shot Neural Architecture Search ## Introduction One-shot weight sharing methods have recently drawn great attention in neural architecture search due to high efficiency and competitive performance. However, weight sharing across models has an inherent deficiency, i.e., insufficient training of subnetworks in the hypernetwork. To alleviate this problem, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. We directly select the most promising one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. For more details, please refer to the paper (coming soon). From e04200c73c08c4c44b957c726fe39d8fb3732144 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 23 Nov 2020 09:25:27 +0800 Subject: [PATCH 41/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 95cd846573..4621d783b3 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -1,8 +1,19 @@ -# Cream of the Crop: -# Distilling Prioritized Paths For One-Shot Neural Architecture Search +# Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search + + +# Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search +This is an official implementation for our Cream NAS work presented in NeurIPS'20. + +**[[Paper]](https://papers.nips.cc/paper/2020/file/d072677d210ac4c03ba046120f0802ec-Paper.pdf) [[Models-Google Drive]](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing)[[Models-Baidu Disk]](https://pan.baidu.com/s/4hymmwni) [[BibTex]](https://scholar.googleusercontent.com/scholar.bib?q=info:ICWVXc_SsKAJ:scholar.google.com/&output=citation&scisdr=CgUmooXfEMfTi0cV5aU:AAGBfm0AAAAAX7sQ_aXoamdKRaBI12tAVN8REq1VKNwM&scisig=AAGBfm0AAAAAX7sQ_RdYtp6BSro3zgbXVJU2MCgsG730&scisf=4&ct=citation&cd=-1&hl=ja)**
    + +In this work, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. The discovered architectures achieve superior performance compared to the recent MobileNetV3 (Google, ICCV’19) and EfficientNet (Google, ICML’19) families under aligned settings. + +
    + +
    ## Introduction -One-shot weight sharing methods have recently drawn great attention in neural architecture search due to high efficiency and competitive performance. However, weight sharing across models has an inherent deficiency, i.e., insufficient training of subnetworks in the hypernetwork. To alleviate this problem, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. We directly select the most promising one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. For more details, please refer to the paper (coming soon). +Neural Architecture Search (NAS) is an exciting field which facilitates the automatic design of deep networks. In this project, we propose a novel one-shot NAS method and introduce the concept of prioritized paths to NAS. The prioritized path refers to the architecture candidates exhibiting superior performance during searching. Distilling knowledge from prioritized paths allows boosting the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final discovered paths are the cream of the crop. The discovered architectures achieve superior performance compared to the recent MobileNetV3 (Google, ICCV’19) and EfficientNet (Google, ICML’19) families under aligned settings.
    Cream
    From 47dce8c283a77e2696aa5fe04fd247d92a1d66ed Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 23 Nov 2020 09:26:19 +0800 Subject: [PATCH 42/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 4621d783b3..c7cbec25dd 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -1,6 +1,3 @@ -# Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search - - # Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search This is an official implementation for our Cream NAS work presented in NeurIPS'20. From 5231d3b151942d27e4f717e786f76ae616ff3a51 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 23 Nov 2020 09:29:34 +0800 Subject: [PATCH 43/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index c7cbec25dd..93685b1c53 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -1,12 +1,11 @@ # Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search -This is an official implementation for our Cream NAS work presented in NeurIPS'20. **[[Paper]](https://papers.nips.cc/paper/2020/file/d072677d210ac4c03ba046120f0802ec-Paper.pdf) [[Models-Google Drive]](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing)[[Models-Baidu Disk]](https://pan.baidu.com/s/4hymmwni) [[BibTex]](https://scholar.googleusercontent.com/scholar.bib?q=info:ICWVXc_SsKAJ:scholar.google.com/&output=citation&scisdr=CgUmooXfEMfTi0cV5aU:AAGBfm0AAAAAX7sQ_aXoamdKRaBI12tAVN8REq1VKNwM&scisig=AAGBfm0AAAAAX7sQ_RdYtp6BSro3zgbXVJU2MCgsG730&scisf=4&ct=citation&cd=-1&hl=ja)**
    In this work, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. The discovered architectures achieve superior performance compared to the recent MobileNetV3 (Google, ICCV’19) and EfficientNet (Google, ICML’19) families under aligned settings.
    - +
    ## Introduction From ce698c3491a45217ee6cf875817837750b499f88 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 23 Nov 2020 09:35:24 +0800 Subject: [PATCH 44/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 7 ------- 1 file changed, 7 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 93685b1c53..ecc457da18 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -8,13 +8,6 @@ In this work, we present a simple yet effective architecture distillation method
    -## Introduction -Neural Architecture Search (NAS) is an exciting field which facilitates the automatic design of deep networks. In this project, we propose a novel one-shot NAS method and introduce the concept of prioritized paths to NAS. The prioritized path refers to the architecture candidates exhibiting superior performance during searching. Distilling knowledge from prioritized paths allows boosting the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final discovered paths are the cream of the crop. The discovered architectures achieve superior performance compared to the recent MobileNetV3 (Google, ICCV’19) and EfficientNet (Google, ICML’19) families under aligned settings. - -
    - Cream
    - -
    ## Reproduction Results Top-1 Accuracy on ImageNet. The top-1 accuracy of Cream search algorithm surpasses MobileNetV3 and EfficientNet-B0/B1 on ImageNet. From 0d63ceb00e5be391f11f3bcebef49309e0c3e207 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 23 Nov 2020 10:40:35 +0800 Subject: [PATCH 45/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index ecc457da18..a70bb442e1 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -2,14 +2,14 @@ **[[Paper]](https://papers.nips.cc/paper/2020/file/d072677d210ac4c03ba046120f0802ec-Paper.pdf) [[Models-Google Drive]](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing)[[Models-Baidu Disk]](https://pan.baidu.com/s/4hymmwni) [[BibTex]](https://scholar.googleusercontent.com/scholar.bib?q=info:ICWVXc_SsKAJ:scholar.google.com/&output=citation&scisdr=CgUmooXfEMfTi0cV5aU:AAGBfm0AAAAAX7sQ_aXoamdKRaBI12tAVN8REq1VKNwM&scisig=AAGBfm0AAAAAX7sQ_RdYtp6BSro3zgbXVJU2MCgsG730&scisf=4&ct=citation&cd=-1&hl=ja)**
    -In this work, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. The discovered architectures achieve superior performance compared to the recent MobileNetV3 (Google, ICCV’19) and EfficientNet (Google, ICML’19) families under aligned settings. +In this work, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. The discovered architectures achieve superior performance compared to the recent [MobileNetV3](https://arxiv.org/abs/1905.02244) and [EfficientNet](https://arxiv.org/abs/1905.11946) families under aligned settings.
    -## Reproduction Results +## Reproduced Results Top-1 Accuracy on ImageNet. The top-1 accuracy of Cream search algorithm surpasses MobileNetV3 and EfficientNet-B0/B1 on ImageNet. The training with 16 Gpus is a little bit superior than 8 Gpus, as below. From 274fb23de58ee9cc07a376b270a33ec0ef7c416f Mon Sep 17 00:00:00 2001 From: mapleam Date: Mon, 23 Nov 2020 22:08:34 +0800 Subject: [PATCH 46/62] version 3.0 --- docs/en_US/NAS/Cream.md | 233 ++++++------- examples/nas/cream/configs/retrain.yaml | 52 +++ examples/nas/cream/configs/test.yaml | 37 ++ examples/nas/cream/{ => configs}/train.yaml | 2 +- examples/nas/cream/lib/config.py | 1 + examples/nas/cream/lib/core/retrain.py | 135 ++++++++ examples/nas/cream/lib/core/test.py | 87 +++++ .../lib/models/builders/build_supernet.py | 58 ++-- .../cream/lib/models/structures/supernet.py | 16 +- examples/nas/cream/lib/utils/flops_table.py | 34 +- examples/nas/cream/retrain.py | 318 ++++++++++++++++++ examples/nas/cream/run.sh | 1 - examples/nas/cream/test.py | 158 +++++++++ examples/nas/cream/train.py | 9 +- .../pynni/nni/nas/pytorch/cream/trainer.py | 24 +- 15 files changed, 964 insertions(+), 201 deletions(-) create mode 100644 examples/nas/cream/configs/retrain.yaml create mode 100644 examples/nas/cream/configs/test.yaml rename examples/nas/cream/{ => configs}/train.yaml (97%) create mode 100644 examples/nas/cream/lib/core/retrain.py create mode 100644 examples/nas/cream/lib/core/test.py create mode 100644 examples/nas/cream/retrain.py delete mode 100644 examples/nas/cream/run.sh create mode 100644 examples/nas/cream/test.py diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 8603f9deec..3dc2171a76 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -1,116 +1,117 @@ -# Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search - -## Introduction -One-shot weight sharing methods have recently drawn great attention in neural architecture search due to high efficiency and competitive performance. However, weight sharing across models has an inherent deficiency, i.e., insufficient training of subnetworks in the hypernetwork. To alleviate this problem, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. We directly select the most promising one from the prioritized paths as the final architecture, without using other complex search methods, such as reinforcement learning or evolution algorithms. The experiments on ImageNet verify such path distillation method can improve the convergence ratio and performance of the hypernetwork, as well as boosting the training of subnetworks. The discovered architectures achieve superior performance compared to the recent MobileNetV3 and EfficientNet families under aligned settings. Moreover, the experiments on object detection and more challenging search space show the generality and robustness of the proposed method. For more details, please refer to the paper (coming soon). - -
    - Cream
    - -
    - -## Reproduction Results -Top-1 Accuracy on ImageNet. The top-1 accuracy of Cream search algorithm surpasses MobileNetV3 and EfficientNet-B0/B1 on ImageNet. -The training with 16 Gpus is a little bit superior than 8 Gpus, as below. - -| Model (M Flops) | 8Gpus | 16Gpus | -| ---- |:-------------:| :-----:| -| 14M | 59.3 | 59.6 | -| 42M | 65.8 | 66.5 | -| 114M | 72.1 | 72.8 | -| 285M | 76.7 | 77.6 | -| 470M | 78.9 | 79.2 | -| 600M | 79.4 | 80.0 | - -
    drawing drawing
    - - -
    drawingdrawing
    - - -## Requirements -* python >= 3.6 -* torch >= 1.2 -* torchscope -* apex - -## Examples - -[Example code](https://github.com/microsoft/nni/tree/master/examples/nas/cream) - -Please run the following scripts in the example folder. - -## Data Preparation - -You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./data/imagenet` and move the validation set to the subfolder `./data/imagenet/val`. To move the validation set, you cloud use the following script: - -Put the imagenet data in `./data`. It should be like following: - -``` -./data/imagenet/train -./data/imagenet/val -... -``` - -## Quick Start - -### I. Search - -First, build environments for searching. - -``` -pip install -r ./requirements.txt -``` - -To search for an architecture, you need to configure the parameters `flops_minimum` and `flops_maximum` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./run.sh` - -``` ---flops_minimum 0 # Minimum Flops of Architecture ---flops_maximum 600 # Maximum Flops of Architecture -``` - -For example, if you expect to search an architecture with model flops <= 200M, please set the `flops_minimum` and `flops_maximum` to be `0` and `200`. - -After you specify the flops of the architectures you would like to search, you can search an architecture now by running: - -``` -sh ./run.sh -``` - -The searched architectures need to be retrained and obtain the final model. The final model is saved in `.pth.tar` format. Retraining code will be released soon. - -### II. Test - -To test our trained of models, you need to use `model_selection` in `./test.sh` to specify which model to test. - -``` ---model_selection 42 # test 42m model ---model_selection 470 # test 470m model -...... -``` - -After specifying the flops of the model, you need to write the path to the resume model in `./test.sh`. - -``` ---resume './data/ckpts/42.pth.tar' ---resume './data/ckpts/470.pth.tar' -...... -``` - -We provide 14M/42M/114M/285M/470M/600M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). - -After downloading the pretrained models and adding `--model_selection` and `--resume` in './test.sh', you need to use the following command to test the model. - -``` -sh ./test.sh -``` - -The test result will be saved in `./retrain`. You can configure the `--output` in `./test.sh` to specify a path for it. - -```eval_rst -.. autoclass:: nni.nas.pytorch.cream.CreamSupernetTrainer - :members: - -.. autoclass:: nni.nas.pytorch.cdarts.CreamSupernetTrainingMutator - :members: - -``` +# Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search + +**[[Paper]](https://papers.nips.cc/paper/2020/file/d072677d210ac4c03ba046120f0802ec-Paper.pdf) [[Models-Google Drive]](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing)[[Models-Baidu Disk]](https://pan.baidu.com/s/4hymmwni) [[BibTex]](https://scholar.googleusercontent.com/scholar.bib?q=info:ICWVXc_SsKAJ:scholar.google.com/&output=citation&scisdr=CgUmooXfEMfTi0cV5aU:AAGBfm0AAAAAX7sQ_aXoamdKRaBI12tAVN8REq1VKNwM&scisig=AAGBfm0AAAAAX7sQ_RdYtp6BSro3zgbXVJU2MCgsG730&scisf=4&ct=citation&cd=-1&hl=ja)**
    + +In this work, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. The discovered architectures achieve superior performance compared to the recent [MobileNetV3](https://arxiv.org/abs/1905.02244) and [EfficientNet](https://arxiv.org/abs/1905.11946) families under aligned settings. + +
    + +
    + + +## Reproduced Results +Top-1 Accuracy on ImageNet. The top-1 accuracy of Cream search algorithm surpasses MobileNetV3 and EfficientNet-B0/B1 on ImageNet. +The training with 16 Gpus is a little bit superior than 8 Gpus, as below. + +| Model (M Flops) | 8Gpus | 16Gpus | +| ---- |:-------------:| :-----:| +| 14M | 59.3 | 59.6 | +| 42M | 65.8 | 66.5 | +| 114M | 72.1 | 72.8 | +| 287M | 76.7 | 77.6 | +| 481M | 78.9 | 79.2 | +| 604M | 79.4 | 80.0 | + + + + +
    drawingdrawing
    + +## Examples + +[Example code](https://github.com/microsoft/nni/tree/master/examples/nas/cream) + +Please run the following scripts in the example folder. + +## Data Preparation + +You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./data/imagenet` and move the validation set to the subfolder `./data/imagenet/val`. To move the validation set, you cloud use the following script: + +Put the imagenet data in `./data`. It should be like following: + +``` +./data/imagenet/train +./data/imagenet/val +... +``` + +## Quick Start + +### I. Search + +First, build environments for searching. + +``` +pip install -r ./requirements +``` + +To search for an architecture, you need to configure the parameters `FLOPS_MINIMUM` and `FLOPS_MAXIMUM` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./configs/train.yaml` + +``` +FLOPS_MINIMUM: 0 # Minimum Flops of Architecture +FLOPS_MAXIMUM: 600 # Maximum Flops of Architecture +``` + +For example, if you expect to search an architecture with model flops <= 200M, please set the `FLOPS_MINIMUM` and `FLOPS_MAXIMUM` to be `0` and `200`. + +After you specify the flops of the architectures you would like to search, you can search an architecture now by running: + +``` +python -m torch.distributed.launch --nproc_per_node=8 ./train.py --cfg ./configs/train.yaml +``` + +The searched architectures need to be retrained and obtain the final model. The final model is saved in `.pth.tar` format. Retraining code will be released soon. + +### II. Retrain + +To train searched architectures, you need to configure the parameter `MODEL_SELECTION` to specify the model Flops. To specify which model to train, you should add `MODEL_SELECTION` in `./configs/retrain.yaml`. You can select one from [14,42,112,287,481,604], which stands for different Flops(MB). +```buildoutcfg +MODEL_SELECTION: 42 # Retrain 42m model +MODEL_SELECTION: 481 # Retrain 481m model +...... +``` + + + +After adding `MODEL_SELECTION` in `./configs/retrain.yaml`, you need to use the following command to train the model. +```buildoutcfg +python -m torch.distributed.launch --nproc_per_node=8 ./retrain.py --cfg ./configs/retrain.yaml +``` + +### II. Test + +To test our trained of models, you need to use `MODEL_SELECTION` in `./configs/test.yaml` to specify which model to test. + +``` +MODEL_SELECTION: 42 # test 42m model +MODEL_SELECTION: 481 # test 470m model +...... +``` + +After specifying the flops of the model, you need to write the path to the resume model in `./test.sh`. + +``` +RESUME_PATH: './42.pth.tar' +RESUME_PATH: './481.pth.tar' +...... +``` + +We provide 14M/42M/114M/287M/481M/604M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). + +After downloading the pretrained models and adding `MODEL_SELECTION` and `RESUME_PATH` in './configs/test.yaml', you need to use the following command to test the model. + +``` +python -m torch.distributed.launch --nproc_per_node=8 ./test.py --cfg ./configs/test.yaml +``` + + diff --git a/examples/nas/cream/configs/retrain.yaml b/examples/nas/cream/configs/retrain.yaml new file mode 100644 index 0000000000..f042bb1af2 --- /dev/null +++ b/examples/nas/cream/configs/retrain.yaml @@ -0,0 +1,52 @@ +AUTO_RESUME: False +DATA_DIR: './data/imagenet' +MODEL: '604m_retrain' +RESUME_PATH: './experiments/workspace/retrain/resume.pth.tar' +SAVE_PATH: './' +SEED: 42 +LOG_INTERVAL: 50 +RECOVERY_INTERVAL: 0 +WORKERS: 4 +NUM_GPU: 2 +SAVE_IMAGES: False +AMP: False +OUTPUT: 'None' +EVAL_METRICS: 'prec1' +TTA: 0 +LOCAL_RANK: 0 + +DATASET: + NUM_CLASSES: 1000 + IMAGE_SIZE: 224 # image patch size + INTERPOLATION: 'random' # Image resize interpolation type + BATCH_SIZE: 128 # batch size + NO_PREFECHTER: False + +NET: + GP: 'avg' + DROPOUT_RATE: 0.0 + SELECTION: 42 + + EMA: + USE: True + FORCE_CPU: False # force model ema to be tracked on CPU + DECAY: 0.9998 + +OPT: 'rmsproptf' +OPT_EPS: 1e-2 +MOMENTUM: 0.9 +DECAY_RATE: 0.1 + +SCHED: 'sgd' +LR_NOISE: None +LR_NOISE_PCT: 0.67 +LR_NOISE_STD: 1.0 +WARMUP_LR: 1e-4 +MIN_LR: 1e-5 +EPOCHS: 200 +START_EPOCH: None +DECAY_EPOCHS: 30.0 +WARMUP_EPOCHS: 3 +COOLDOWN_EPOCHS: 10 +PATIENCE_EPOCHS: 10 +LR: 1e-2 \ No newline at end of file diff --git a/examples/nas/cream/configs/test.yaml b/examples/nas/cream/configs/test.yaml new file mode 100644 index 0000000000..4bf568517f --- /dev/null +++ b/examples/nas/cream/configs/test.yaml @@ -0,0 +1,37 @@ +AUTO_RESUME: True +DATA_DIR: './data/imagenet' +MODEL: 'Childnet_Testing' +RESUME_PATH: './experiments/workspace/ckps/42.pth.tar' +SAVE_PATH: './' +SEED: 42 +LOG_INTERVAL: 50 +RECOVERY_INTERVAL: 0 +WORKERS: 4 +NUM_GPU: 2 +SAVE_IMAGES: False +AMP: False +OUTPUT: 'None' +EVAL_METRICS: 'prec1' +TTA: 0 +LOCAL_RANK: 0 + +DATASET: + NUM_CLASSES: 1000 + IMAGE_SIZE: 224 # image patch size + INTERPOLATION: 'bilinear' # Image resize interpolation type + BATCH_SIZE: 32 # batch size + NO_PREFECHTER: False + +NET: + GP: 'avg' + DROPOUT_RATE: 0.0 + SELECTION: 42 + + EMA: + USE: True + FORCE_CPU: False # force model ema to be tracked on CPU + DECAY: 0.9998 + +OPTIMIZER: + MOMENTUM: 0.9 + WEIGHT_DECAY: 1e-3 \ No newline at end of file diff --git a/examples/nas/cream/train.yaml b/examples/nas/cream/configs/train.yaml similarity index 97% rename from examples/nas/cream/train.yaml rename to examples/nas/cream/configs/train.yaml index 2545cd5959..85164e0eda 100644 --- a/examples/nas/cream/train.yaml +++ b/examples/nas/cream/configs/train.yaml @@ -2,7 +2,7 @@ AUTO_RESUME: False DATA_DIR: './data/imagenet' MODEL: 'Supernet_Training' RESUME_PATH: './experiments/workspace/train/resume.pth.tar' -SAVE_PATH: './checkpoints' +SAVE_PATH: './' SEED: 42 LOG_INTERVAL: 50 RECOVERY_INTERVAL: 0 diff --git a/examples/nas/cream/lib/config.py b/examples/nas/cream/lib/config.py index 8bd67b6ef6..a605f08dc7 100644 --- a/examples/nas/cream/lib/config.py +++ b/examples/nas/cream/lib/config.py @@ -30,6 +30,7 @@ __C.NUM_GPU = 1 __C.SAVE_IMAGES = False __C.AMP = False +__C.ACC_GAP = 5 __C.OUTPUT = 'output/path/' __C.EVAL_METRICS = 'prec1' __C.TTA = 0 # Test or inference time augmentation diff --git a/examples/nas/cream/lib/core/retrain.py b/examples/nas/cream/lib/core/retrain.py new file mode 100644 index 0000000000..7468db2bb5 --- /dev/null +++ b/examples/nas/cream/lib/core/retrain.py @@ -0,0 +1,135 @@ +import os +import time +import torch +import torchvision + +from collections import OrderedDict + +from lib.utils.util import AverageMeter, accuracy, reduce_tensor + +def train_epoch( + epoch, model, loader, optimizer, loss_fn, cfg, + lr_scheduler=None, saver=None, output_dir='', use_amp=False, + model_ema=None, logger=None, writer=None, local_rank=0): + batch_time_m = AverageMeter() + data_time_m = AverageMeter() + losses_m = AverageMeter() + prec1_m = AverageMeter() + prec5_m = AverageMeter() + + model.train() + + end = time.time() + last_idx = len(loader) - 1 + num_updates = epoch * len(loader) + optimizer.zero_grad() + for batch_idx, (input, target) in enumerate(loader): + last_batch = batch_idx == last_idx + data_time_m.update(time.time() - end) + + input = input.cuda() + target = target.cuda() + output = model(input) + + loss = loss_fn(output, target) + + prec1, prec5 = accuracy(output, target, topk=(1, 5)) + + if cfg.NUM_GPU > 1: + reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU) + prec1 = reduce_tensor(prec1, cfg.NUM_GPU) + prec5 = reduce_tensor(prec5, cfg.NUM_GPU) + else: + reduced_loss = loss.data + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + torch.cuda.synchronize() + + losses_m.update(reduced_loss.item(), input.size(0)) + prec1_m.update(prec1.item(), output.size(0)) + prec5_m.update(prec5.item(), output.size(0)) + + if model_ema is not None: + model_ema.update(model) + num_updates += 1 + + batch_time_m.update(time.time() - end) + if last_batch or batch_idx % cfg.LOG_INTERVAL == 0: + lrl = [param_group['lr'] for param_group in optimizer.param_groups] + lr = sum(lrl) / len(lrl) + + if local_rank == 0: + logger.info( + 'Train: {} [{:>4d}/{}] ' + 'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) ' + 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' + 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f}) ' + 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ' + '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' + 'LR: {lr:.3e}' + 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format( + epoch, + batch_idx, + len(loader), + loss=losses_m, + top1=prec1_m, + top5=prec5_m, + batch_time=batch_time_m, + rate=input.size(0) * + cfg.NUM_GPU / + batch_time_m.val, + rate_avg=input.size(0) * + cfg.NUM_GPU / + batch_time_m.avg, + lr=lr, + data_time=data_time_m)) + + writer.add_scalar( + 'Loss/train', + prec1_m.avg, + epoch * + len(loader) + + batch_idx) + writer.add_scalar( + 'Accuracy/train', + prec1_m.avg, + epoch * + len(loader) + + batch_idx) + writer.add_scalar( + 'Learning_Rate', + optimizer.param_groups[0]['lr'], + epoch * len(loader) + batch_idx) + + if cfg.SAVE_IMAGES and output_dir: + torchvision.utils.save_image( + input, os.path.join( + output_dir, 'train-batch-%d.jpg' % + batch_idx), padding=0, normalize=True) + + if saver is not None and cfg.RECOVERY_INTERVAL and ( + last_batch or (batch_idx + 1) % cfg.RECOVERY_INTERVAL == 0): + saver.save_recovery( + model, + optimizer, + cfg, + epoch, + model_ema=model_ema, + use_amp=use_amp, + batch_idx=batch_idx) + + if lr_scheduler is not None: + lr_scheduler.step_update( + num_updates=num_updates, + metric=losses_m.avg) + + end = time.time() + # end for + + if hasattr(optimizer, 'sync_lookahead'): + optimizer.sync_lookahead() + + return OrderedDict([('loss', losses_m.avg)]) diff --git a/examples/nas/cream/lib/core/test.py b/examples/nas/cream/lib/core/test.py new file mode 100644 index 0000000000..7ab69b57c0 --- /dev/null +++ b/examples/nas/cream/lib/core/test.py @@ -0,0 +1,87 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import time +import torch + +from collections import OrderedDict +from lib.utils.util import AverageMeter, accuracy, reduce_tensor + + +def validate(epoch, model, loader, loss_fn, cfg, log_suffix='', logger=None, writer=None, local_rank=0): + batch_time_m = AverageMeter() + losses_m = AverageMeter() + prec1_m = AverageMeter() + prec5_m = AverageMeter() + + model.eval() + + end = time.time() + last_idx = len(loader) - 1 + with torch.no_grad(): + for batch_idx, (input, target) in enumerate(loader): + last_batch = batch_idx == last_idx + + output = model(input) + if isinstance(output, (tuple, list)): + output = output[0] + + # augmentation reduction + reduce_factor = cfg.TTA + if reduce_factor > 1: + output = output.unfold( + 0, + reduce_factor, + reduce_factor).mean( + dim=2) + target = target[0:target.size(0):reduce_factor] + + loss = loss_fn(output, target) + prec1, prec5 = accuracy(output, target, topk=(1, 5)) + + if cfg.NUM_GPU > 1: + reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU) + prec1 = reduce_tensor(prec1, cfg.NUM_GPU) + prec5 = reduce_tensor(prec5, cfg.NUM_GPU) + else: + reduced_loss = loss.data + + torch.cuda.synchronize() + + losses_m.update(reduced_loss.item(), input.size(0)) + prec1_m.update(prec1.item(), output.size(0)) + prec5_m.update(prec5.item(), output.size(0)) + + batch_time_m.update(time.time() - end) + end = time.time() + if local_rank == 0 and (last_batch or batch_idx % cfg.LOG_INTERVAL == 0): + log_name = 'Test' + log_suffix + logger.info( + '{0}: [{1:>4d}/{2}] ' + 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) ' + 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' + 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' + 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format( + log_name, batch_idx, last_idx, + batch_time=batch_time_m, loss=losses_m, + top1=prec1_m, top5=prec5_m)) + + writer.add_scalar( + 'Loss' + log_suffix + '/vaild', + prec1_m.avg, + epoch * len(loader) + batch_idx) + writer.add_scalar( + 'Accuracy' + + log_suffix + + '/vaild', + prec1_m.avg, + epoch * + len(loader) + + batch_idx) + + metrics = OrderedDict( + [('loss', losses_m.avg), ('prec1', prec1_m.avg), ('prec5', prec5_m.avg)]) + + return metrics diff --git a/examples/nas/cream/lib/models/builders/build_supernet.py b/examples/nas/cream/lib/models/builders/build_supernet.py index 0164174af0..37d9c575c8 100644 --- a/examples/nas/cream/lib/models/builders/build_supernet.py +++ b/examples/nas/cream/lib/models/builders/build_supernet.py @@ -5,6 +5,7 @@ from timm.models.efficientnet_blocks import * +from nni.nas.pytorch import mutables class SuperNetBuilder: """ Build Trunk Blocks @@ -122,25 +123,22 @@ def __call__(self, in_chs, model_block_args): List of block stacks (each stack wrapped in nn.Sequential) """ if self.verbose: - self.logger.info( - 'Building model trunk with %d stages...' % - len(model_block_args)) + logging.info('Building model trunk with %d stages...' % len(model_block_args)) self.in_chs = in_chs total_block_count = sum([len(x) for x in model_block_args]) total_block_idx = 0 current_stride = 2 current_dilation = 1 feature_idx = 0 - stages = nn.ModuleList() - # outer list of block_args defines the stacks ('stages' by some - # conventions) + stages = [] + # outer list of block_args defines the stacks ('stages' by some conventions) for stage_idx, stage_block_args in enumerate(model_block_args): last_stack = stage_idx == (len(model_block_args) - 1) if self.verbose: self.logger.info('Stack: {}'.format(stage_idx)) assert isinstance(stage_block_args, list) - blocks = nn.ModuleList() + # blocks = [] # each stack (stage) contains a list of block arguments for block_idx, block_args in enumerate(stage_block_args): last_block = block_idx == (len(stage_block_args) - 1) @@ -159,17 +157,14 @@ def __call__(self, in_chs, model_block_args): if next_output_stride > self.output_stride: next_dilation = current_dilation * block_args['stride'] block_args['stride'] = 1 - if self.verbose: - self.logger.info( - ' Converting stride to dilation to maintain output_stride=={}'.format( - self.output_stride)) else: current_stride = next_output_stride block_args['dilation'] = current_dilation if next_dilation != current_dilation: current_dilation = next_dilation - if stage_idx == 0 or stage_idx == 6: + + if stage_idx==0 or stage_idx==6: self.choice_num = 1 else: self.choice_num = len(self.choices) @@ -177,43 +172,30 @@ def __call__(self, in_chs, model_block_args): if self.dil_conv: self.choice_num += 2 - choice_blocks = nn.ModuleList() + choice_blocks = [] block_args_copy = deepcopy(block_args) if self.choice_num == 1: # create the block - block = self._make_block( - block_args, 0, total_block_idx, total_block_count) + block = self._make_block(block_args, 0, total_block_idx, total_block_count) choice_blocks.append(block) else: for choice_idx, choice in enumerate(self.choices): # create the block block_args = deepcopy(block_args_copy) - block_args = modify_block_args( - block_args, choice[0], choice[1]) - block = self._make_block( - block_args, choice_idx, total_block_idx, total_block_count) + block_args = modify_block_args(block_args, choice[0], choice[1]) + block = self._make_block(block_args, choice_idx, total_block_idx, total_block_count) choice_blocks.append(block) if self.dil_conv: block_args = deepcopy(block_args_copy) block_args = modify_block_args(block_args, 3, 0) - block = self._make_block( - block_args, - self.choice_num - 2, - total_block_idx, - total_block_count, - resunit=self.resunit, - dil_conv=self.dil_conv) + block = self._make_block(block_args, self.choice_num - 2, total_block_idx, total_block_count, + resunit=self.resunit, dil_conv=self.dil_conv) choice_blocks.append(block) block_args = deepcopy(block_args_copy) block_args = modify_block_args(block_args, 5, 0) - block = self._make_block( - block_args, - self.choice_num - 1, - total_block_idx, - total_block_count, - resunit=self.resunit, - dil_conv=self.dil_conv) + block = self._make_block(block_args, self.choice_num - 1, total_block_idx, total_block_count, + resunit=self.resunit, dil_conv=self.dil_conv) choice_blocks.append(block) if self.resunit: @@ -222,9 +204,11 @@ def __call__(self, in_chs, model_block_args): block.conv_dw.stride[0]) choice_blocks.append(block) - blocks.append(choice_blocks) - # incr global block idx (across all stacks) - total_block_idx += 1 + choice_block = mutables.LayerChoice(choice_blocks) + stages.append(choice_block) + # create the block + # block = self._make_block(block_args, total_block_idx, total_block_count) + total_block_idx += 1 # incr global block idx (across all stacks) - stages.append(blocks) + # stages.append(blocks) return stages diff --git a/examples/nas/cream/lib/models/structures/supernet.py b/examples/nas/cream/lib/models/structures/supernet.py index f8afe3ee6b..ea09377eb5 100644 --- a/examples/nas/cream/lib/models/structures/supernet.py +++ b/examples/nas/cream/lib/models/structures/supernet.py @@ -70,7 +70,8 @@ def __init__( resunit=resunit, dil_conv=dil_conv, logger=self.logger) - self.blocks = builder(self._in_chs, block_args) + blocks = builder(self._in_chs, block_args) + self.blocks = nn.Sequential(*blocks) self._in_chs = builder.in_chs # Head + Pooling @@ -102,23 +103,18 @@ def reset_classifier(self, num_classes, global_pool='avg'): self.num_features * self.global_pool.feat_mult(), num_classes) if self.num_classes else None - def forward_features(self, x, architecture): + def forward_features(self, x): x = self.conv_stem(x) x = self.bn1(x) x = self.act1(x) - for layer, layer_arch in zip(self.blocks, architecture): - for blocks, arch in zip(layer, layer_arch): - if arch == -1: - continue - x = blocks[arch](x) - + x = self.blocks(x) x = self.global_pool(x) x = self.conv_head(x) x = self.act2(x) return x - def forward(self, x, architecture): - x = self.forward_features(x, architecture) + def forward(self, x): + x = self.forward_features(x) x = x.flatten(1) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) diff --git a/examples/nas/cream/lib/utils/flops_table.py b/examples/nas/cream/lib/utils/flops_table.py index 4c9e4e457d..3e03028d1c 100644 --- a/examples/nas/cream/lib/utils/flops_table.py +++ b/examples/nas/cream/lib/utils/flops_table.py @@ -36,17 +36,14 @@ def __init__(self, model, input_shape=(2, 3, 224, 224), device='cpu'): self.flops_dict[block_id] = {} self.params_dict[block_id] = {} for module_id, module in enumerate(block): - self.flops_dict[block_id][module_id] = {} - self.params_dict[block_id][module_id] = {} - for choice_id, choice in enumerate(module): - flops, params = get_model_complexity_info(choice, tuple( - input.shape[1:]), as_strings=False, print_per_layer_stat=False) - # Flops(M) - self.flops_dict[block_id][module_id][choice_id] = flops / 1e6 - # Params(M) - self.params_dict[block_id][module_id][choice_id] = params / 1e6 - - input = choice(input) + flops, params = get_model_complexity_info(module, tuple( + input.shape[1:]), as_strings=False, print_per_layer_stat=False) + # Flops(M) + self.flops_dict[block_id][module_id] = flops / 1e6 + # Params(M) + self.params_dict[block_id][module_id] = params / 1e6 + + input = module(input) # conv_last flops, params = get_model_complexity_info(model.global_pool, tuple( @@ -66,18 +63,17 @@ def __init__(self, model, input_shape=(2, 3, 224, 224), device='cpu'): def get_params(self, arch): params = 0 for block_id, block in enumerate(arch): - for module_id, choice in enumerate(block): - if choice == -1: - continue - params += self.params_dict[block_id][module_id][choice] + if block == -1: + continue + params += self.params_dict[block_id][block] return params + self.params_fixed # return flops (M) def get_flops(self, arch): flops = 0 for block_id, block in enumerate(arch): - for module_id, choice in enumerate(block): - if choice == -1: - continue - flops += self.flops_dict[block_id][module_id][choice] + if block is 'LayerChoice1' or block_id is 'LayerChoice23': + continue + for idx, choice in enumerate(arch[block]): + flops += self.flops_dict[block_id][idx] * (1 if choice else 0) return flops + self.flops_fixed diff --git a/examples/nas/cream/retrain.py b/examples/nas/cream/retrain.py new file mode 100644 index 0000000000..5b2187a9bd --- /dev/null +++ b/examples/nas/cream/retrain.py @@ -0,0 +1,318 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import os +import warnings +import datetime +import torch +import numpy as np +import torch.nn as nn + +from torchscope import scope +from torch.utils.tensorboard import SummaryWriter + +# import timm packages +from timm.optim import create_optimizer +from timm.models import resume_checkpoint +from timm.scheduler import create_scheduler +from timm.data import Dataset, create_loader +from timm.utils import ModelEma, update_summary +from timm.loss import LabelSmoothingCrossEntropy + +# import apex as distributed package +try: + from apex import amp + from apex.parallel import DistributedDataParallel as DDP + from apex.parallel import convert_syncbn_model + HAS_APEX = True +except ImportError: + from torch.nn.parallel import DistributedDataParallel as DDP + HAS_APEX = False + +# import models and training functions +from lib.core.test import validate +from lib.core.retrain import train_epoch +from lib.models.structures.childnet import gen_childnet +from lib.utils.util import parse_config_args, get_logger, get_model_flops_params +from lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + + +def main(): + args, cfg = parse_config_args('nni.cream.childnet') + + # resolve logging + output_dir = os.path.join(cfg.SAVE_PATH, + "{}-{}".format(datetime.date.today().strftime('%m%d'), + cfg.MODEL)) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + + if args.local_rank == 0: + logger = get_logger(os.path.join(output_dir, 'retrain.log')) + writer = SummaryWriter(os.path.join(output_dir, 'runs')) + else: + writer, logger = None, None + + # retrain model selection + if cfg.NET.SELECTION == 470: + arch_list = [ + [0], [ + 3, 4, 3, 1], [ + 3, 2, 3, 0], [ + 3, 3, 3, 1], [ + 3, 3, 3, 3], [ + 3, 3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == 42: + arch_list = [[0], [3], [3, 1], [3, 1], [3, 3, 3], [3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 96 + elif cfg.NET.SELECTION == 14: + arch_list = [[0], [3], [3, 3], [3, 3], [3], [3], [0]] + cfg.DATASET.IMAGE_SIZE = 64 + elif cfg.NET.SELECTION == 112: + arch_list = [[0], [3], [3, 3], [3, 3], [3, 3, 3], [3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 160 + elif cfg.NET.SELECTION == 285: + arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == 600: + arch_list = [ + [0], [ + 3, 3, 2, 3, 3], [ + 3, 2, 3, 2, 3], [ + 3, 2, 3, 2, 3], [ + 3, 3, 2, 2, 3, 3], [ + 3, 3, 2, 3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + else: + raise ValueError("Model Retrain Selection is not Supported!") + + # define childnet architecture from arch_list + stem = ['ds_r1_k3_s1_e1_c16_se0.25', 'cn_r1_k1_s1_c320_se0.25'] + choice_block_pool = ['ir_r1_k3_s2_e4_c24_se0.25', + 'ir_r1_k5_s2_e4_c40_se0.25', + 'ir_r1_k3_s2_e6_c80_se0.25', + 'ir_r1_k3_s1_e6_c96_se0.25', + 'ir_r1_k3_s2_e6_c192_se0.25'] + arch_def = [[stem[0]]] + [[choice_block_pool[idx] + for repeat_times in range(len(arch_list[idx + 1]))] + for idx in range(len(choice_block_pool))] + [[stem[1]]] + + # generate childnet + model = gen_childnet( + arch_list, + arch_def, + num_classes=cfg.DATASET.NUM_CLASSES, + drop_rate=cfg.NET.DROPOUT_RATE, + global_pool=cfg.NET.GP) + + # initialize training parameters + eval_metric = cfg.EVAL_METRICS + best_metric, best_epoch, saver = None, None, None + + # initialize distributed parameters + distributed = cfg.NUM_GPU > 1 + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group(backend='nccl', init_method='env://') + if args.local_rank == 0: + logger.info( + 'Training on Process {} with {} GPUs.'.format( + args.local_rank, cfg.NUM_GPU)) + + # fix random seeds + torch.manual_seed(cfg.SEED) + torch.cuda.manual_seed_all(cfg.SEED) + np.random.seed(cfg.SEED) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + # get parameters and FLOPs of model + if args.local_rank == 0: + macs, params = get_model_flops_params(model, input_size=( + 1, 3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE)) + logger.info( + '[Model-{}] Flops: {} Params: {}'.format(cfg.NET.SELECTION, macs, params)) + + # create optimizer + optimizer = create_optimizer(cfg, model) + model = model.cuda() + + # optionally resume from a checkpoint + resume_state, resume_epoch = {}, None + if cfg.AUTO_RESUME: + resume_state, resume_epoch = resume_checkpoint(model, cfg.RESUME_PATH) + optimizer.load_state_dict(resume_state['optimizer']) + del resume_state + + model_ema = None + if cfg.NET.EMA.USE: + model_ema = ModelEma( + model, + decay=cfg.NET.EMA.DECAY, + device='cpu' if cfg.NET.EMA.FORCE_CPU else '', + resume=cfg.RESUME_PATH if cfg.AUTO_RESUME else None) + + if distributed: + if cfg.BATCHNORM.SYNC_BN: + try: + if HAS_APEX: + model = convert_syncbn_model(model) + else: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm( + model) + if args.local_rank == 0: + logger.info( + 'Converted model to use Synchronized BatchNorm.') + except Exception as e: + if args.local_rank == 0: + logger.error( + 'Failed to enable Synchronized BatchNorm. Install Apex or Torch >= 1.1 with exception {}'.format(e)) + if HAS_APEX: + model = DDP(model, delay_allreduce=True) + else: + if args.local_rank == 0: + logger.info( + "Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.") + # can use device str in Torch >= 1.1 + model = DDP(model, device_ids=[args.local_rank]) + + # imagenet train dataset + train_dir = os.path.join(cfg.DATA_DIR, 'train') + if not os.path.exists(train_dir) and args.local_rank == 0: + logger.error('Training folder does not exist at: {}'.format(train_dir)) + exit(1) + dataset_train = Dataset(train_dir) + loader_train = create_loader( + dataset_train, + input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), + batch_size=cfg.DATASET.BATCH_SIZE, + is_training=True, + color_jitter=cfg.AUGMENTATION.COLOR_JITTER, + auto_augment=cfg.AUGMENTATION.AA, + num_aug_splits=0, + crop_pct=DEFAULT_CROP_PCT, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + num_workers=cfg.WORKERS, + distributed=distributed, + collate_fn=None, + pin_memory=cfg.DATASET.PIN_MEM, + interpolation='random', + re_mode=cfg.AUGMENTATION.RE_MODE, + re_prob=cfg.AUGMENTATION.RE_PROB + ) + + # imagenet validation dataset + eval_dir = os.path.join(cfg.DATA_DIR, 'val') + if not os.path.exists(eval_dir) and args.local_rank == 0: + logger.error( + 'Validation folder does not exist at: {}'.format(eval_dir)) + exit(1) + dataset_eval = Dataset(eval_dir) + loader_eval = create_loader( + dataset_eval, + input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), + batch_size=cfg.DATASET.VAL_BATCH_MUL * cfg.DATASET.BATCH_SIZE, + is_training=False, + interpolation=cfg.DATASET.INTERPOLATION, + crop_pct=DEFAULT_CROP_PCT, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + num_workers=cfg.WORKERS, + distributed=distributed, + pin_memory=cfg.DATASET.PIN_MEM + ) + + # whether to use label smoothing + if cfg.AUGMENTATION.SMOOTHING > 0.: + train_loss_fn = LabelSmoothingCrossEntropy( + smoothing=cfg.AUGMENTATION.SMOOTHING).cuda() + validate_loss_fn = nn.CrossEntropyLoss().cuda() + else: + train_loss_fn = nn.CrossEntropyLoss().cuda() + validate_loss_fn = train_loss_fn + + # create learning rate scheduler + lr_scheduler, num_epochs = create_scheduler(cfg, optimizer) + start_epoch = resume_epoch if resume_epoch is not None else 0 + if start_epoch > 0: + lr_scheduler.step(start_epoch) + if args.local_rank == 0: + logger.info('Scheduled epochs: {}'.format(num_epochs)) + + try: + best_record, best_ep = 0, 0 + for epoch in range(start_epoch, num_epochs): + if distributed: + loader_train.sampler.set_epoch(epoch) + + train_metrics = train_epoch( + epoch, + model, + loader_train, + optimizer, + train_loss_fn, + cfg, + lr_scheduler=lr_scheduler, + saver=saver, + output_dir=output_dir, + model_ema=model_ema, + logger=logger, + writer=writer, + local_rank=args.local_rank) + + eval_metrics = validate( + epoch, + model, + loader_eval, + validate_loss_fn, + cfg, + logger=logger, + writer=writer, + local_rank=args.local_rank) + + if model_ema is not None and not cfg.NET.EMA.FORCE_CPU: + ema_eval_metrics = validate( + epoch, + model_ema.ema, + loader_eval, + validate_loss_fn, + cfg, + log_suffix='_EMA', + logger=logger, + writer=writer) + eval_metrics = ema_eval_metrics + + if lr_scheduler is not None: + lr_scheduler.step(epoch + 1, eval_metrics[eval_metric]) + + update_summary(epoch, train_metrics, eval_metrics, os.path.join( + output_dir, 'summary.csv'), write_header=best_metric is None) + + if saver is not None: + # save proper checkpoint with eval metric + save_metric = eval_metrics[eval_metric] + best_metric, best_epoch = saver.save_checkpoint( + model, optimizer, cfg, + epoch=epoch, model_ema=model_ema, metric=save_metric) + + if best_record < eval_metrics[eval_metric]: + best_record = eval_metrics[eval_metric] + best_ep = epoch + + if args.local_rank == 0: + logger.info( + '*** Best metric: {0} (epoch {1})'.format(best_record, best_ep)) + + except KeyboardInterrupt: + pass + + if best_metric is not None: + logger.info( + '*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch)) + + +if __name__ == '__main__': + main() diff --git a/examples/nas/cream/run.sh b/examples/nas/cream/run.sh deleted file mode 100644 index 3bf9375ac3..0000000000 --- a/examples/nas/cream/run.sh +++ /dev/null @@ -1 +0,0 @@ -python -m torch.distributed.launch --nproc_per_node=8 tools/retrain.py --cfg ./train.yaml \ No newline at end of file diff --git a/examples/nas/cream/test.py b/examples/nas/cream/test.py new file mode 100644 index 0000000000..0c3fa183a6 --- /dev/null +++ b/examples/nas/cream/test.py @@ -0,0 +1,158 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Written by Hao Du and Houwen Peng +# email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com + +import os +import warnings +import datetime +import torch +import torch.nn as nn + +from torch.utils.tensorboard import SummaryWriter + +# import timm packages +from timm.utils import ModelEma +from timm.models import resume_checkpoint +from timm.data import Dataset, create_loader + +# import apex as distributed package +try: + from apex.parallel import convert_syncbn_model + from apex.parallel import DistributedDataParallel as DDP + HAS_APEX = True +except ImportError: + from torch.nn.parallel import DistributedDataParallel as DDP + HAS_APEX = False + +# import models and training functions +from lib.core.test import validate +from lib.models.structures.childnet import gen_childnet +from lib.utils.util import parse_config_args, get_logger, get_model_flops_params +from lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + + +def main(): + args, cfg = parse_config_args('child net testing') + + # resolve logging + output_dir = os.path.join(cfg.SAVE_PATH, + "{}-{}".format(datetime.date.today().strftime('%m%d'), + cfg.MODEL)) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + + if args.local_rank == 0: + logger = get_logger(os.path.join(output_dir, 'test.log')) + writer = SummaryWriter(os.path.join(output_dir, 'runs')) + else: + writer, logger = None, None + + # retrain model selection + if cfg.NET.SELECTION == 470: + arch_list = [ + [0], [ + 3, 4, 3, 1], [ + 3, 2, 3, 0], [ + 3, 3, 3, 1], [ + 3, 3, 3, 3], [ + 3, 3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == 42: + arch_list = [[0], [3], [3, 1], [3, 1], [3, 3, 3], [3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 96 + elif cfg.NET.SELECTION == 14: + arch_list = [[0], [3], [3, 3], [3, 3], [3], [3], [0]] + cfg.DATASET.IMAGE_SIZE = 64 + elif cfg.NET.SELECTION == 112: + arch_list = [[0], [3], [3, 3], [3, 3], [3, 3, 3], [3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 160 + elif cfg.NET.SELECTION == 285: + arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == 600: + arch_list = [[0], [3, 3, 2, 3, 3], [3, 2, 3, 2, 3], [3, 2, 3, 2, 3], + [3, 3, 2, 2, 3, 3], [3, 3, 2, 3, 3, 3], [0]] + cfg.DATASET.IMAGE_SIZE = 224 + else: + raise ValueError("Model Test Selection is not Supported!") + + # define childnet architecture from arch_list + stem = ['ds_r1_k3_s1_e1_c16_se0.25', 'cn_r1_k1_s1_c320_se0.25'] + choice_block_pool = ['ir_r1_k3_s2_e4_c24_se0.25', + 'ir_r1_k5_s2_e4_c40_se0.25', + 'ir_r1_k3_s2_e6_c80_se0.25', + 'ir_r1_k3_s1_e6_c96_se0.25', + 'ir_r1_k3_s2_e6_c192_se0.25'] + arch_def = [[stem[0]]] + [[choice_block_pool[idx] + for repeat_times in range(len(arch_list[idx + 1]))] + for idx in range(len(choice_block_pool))] + [[stem[1]]] + + # generate childnet + model = gen_childnet( + arch_list, + arch_def, + num_classes=cfg.DATASET.NUM_CLASSES, + drop_rate=cfg.NET.DROPOUT_RATE, + global_pool=cfg.NET.GP) + + if args.local_rank == 0: + macs, params = get_model_flops_params(model, input_size=( + 1, 3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE)) + logger.info( + '[Model-{}] Flops: {} Params: {}'.format(cfg.NET.SELECTION, macs, params)) + + # initialize distributed parameters + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group(backend='nccl', init_method='env://') + if args.local_rank == 0: + logger.info( + "Training on Process {} with {} GPUs.".format( + args.local_rank, cfg.NUM_GPU)) + + # resume model from checkpoint + assert cfg.AUTO_RESUME is True and os.path.exists(cfg.RESUME_PATH) + _, __ = resume_checkpoint(model, cfg.RESUME_PATH) + + model = model.cuda() + + model_ema = None + if cfg.NET.EMA.USE: + # Important to create EMA model after cuda(), DP wrapper, and AMP but + # before SyncBN and DDP wrapper + model_ema = ModelEma( + model, + decay=cfg.NET.EMA.DECAY, + device='cpu' if cfg.NET.EMA.FORCE_CPU else '', + resume=cfg.RESUME_PATH) + + # imagenet validation dataset + eval_dir = os.path.join(cfg.DATA_DIR, 'val') + if not os.path.exists(eval_dir) and args.local_rank == 0: + logger.error( + 'Validation folder does not exist at: {}'.format(eval_dir)) + exit(1) + + dataset_eval = Dataset(eval_dir) + loader_eval = create_loader( + dataset_eval, + input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE), + batch_size=cfg.DATASET.VAL_BATCH_MUL * cfg.DATASET.BATCH_SIZE, + is_training=False, + num_workers=cfg.WORKERS, + distributed=True, + pin_memory=cfg.DATASET.PIN_MEM, + crop_pct=DEFAULT_CROP_PCT, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD + ) + + # only test accuracy of model-EMA + validate_loss_fn = nn.CrossEntropyLoss().cuda() + validate(0, model_ema.ema, loader_eval, validate_loss_fn, cfg, + log_suffix='_EMA', logger=logger, + writer=writer, local_rank=args.local_rank) + + +if __name__ == '__main__': + main() diff --git a/examples/nas/cream/train.py b/examples/nas/cream/train.py index 2a176bade8..3a515e136d 100644 --- a/examples/nas/cream/train.py +++ b/examples/nas/cream/train.py @@ -10,10 +10,7 @@ import numpy as np import torch.nn as nn -import _init_paths - # import timm packages -from timm.utils import CheckpointSaver, update_summary from timm.loss import LabelSmoothingCrossEntropy from timm.data import Dataset, create_loader from timm.models import resume_checkpoint @@ -30,8 +27,6 @@ # import models and training functions from lib.utils.flops_table import FlopsEst from lib.models.structures.supernet import gen_supernet -from lib.models.PrioritizedBoard import PrioritizedBoard -from lib.models.MetaMatchingNetwork import MetaMatchingNetwork from lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN from lib.utils.util import parse_config_args, get_logger, \ create_optimizer_supernet, create_supernet_scheduler @@ -85,7 +80,7 @@ def main(): logger=logger) # number of choice blocks in supernet - choice_num = len(model.blocks[1][0]) + choice_num = len(model.blocks[7]) if args.local_rank == 0: logger.info('Supernet created, param count: %d', ( sum([m.numel() for m in model.parameters()]))) @@ -200,7 +195,7 @@ def main(): trainer = CreamSupernetTrainer(model, train_loss_fn, validate_loss_fn, optimizer, num_epochs, loader_train, loader_eval, mutator=mutator, batch_size=cfg.DATASET.BATCH_SIZE, - log_frequency=cfg.LOG_FREQUENCY, + log_frequency=cfg.LOG_INTERVAL, meta_sta_epoch=cfg.SUPERNET.META_STA_EPOCH, update_iter=cfg.SUPERNET.UPDATE_ITER, slices=cfg.SUPERNET.SLICE, diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py index a4fbc1c106..2026de4db3 100644 --- a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py +++ b/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py @@ -95,7 +95,6 @@ def __init__(self, model, loss, val_loss, self.pick_method = pick_method self.pool_size = pool_size self.local_rank = local_rank - self.main_proc = (local_rank == 0) self.choice_num = choice_num self.sta_num = sta_num self.acc_gap = acc_gap @@ -104,6 +103,8 @@ def __init__(self, model, loss, val_loss, self.current_student_arch = None self.current_teacher_arch = None + self.main_proc = (local_rank == 0) + self.current_epoch = 0 self.prioritized_board = [] @@ -317,13 +318,14 @@ def _run_update(self, input, target, batch_idx): def _get_cand_flops(self, cand): flops = 0 for block_id, block in enumerate(cand): - for module_id, choice in enumerate(block): - if choice == -1: - continue - flops += self.flops_dict[block_id][module_id][choice] + if block is 'LayerChoice1' or block_id is 'LayerChoice23': + continue + for idx, choice in enumerate(cand[block]): + flops += self.flops_dict[block_id][idx] * (1 if choice else 0) return flops + self.flops_fixed def train_one_epoch(self, epoch): + self.current_epoch = epoch meters = AverageMeterGroup() for step, (input_data, target) in enumerate(self.train_loader): self.mutator.reset() @@ -337,9 +339,10 @@ def train_one_epoch(self, epoch): # update meta matching network self._run_update(input_data, target, step) - # select teacher architecture - meta_value, teacher_cand = self._select_teacher() - self.current_teacher_arch = teacher_cand + if self._board_size() > 0: + # select teacher architecture + meta_value, teacher_cand = self._select_teacher() + self.current_teacher_arch = teacher_cand # forward supernet if self._board_size() == 0 or epoch <= self.meta_sta_epoch: @@ -347,6 +350,7 @@ def train_one_epoch(self, epoch): output = self.model(input_data) loss = self.loss(output, target) + kd_loss, teacher_output, teacher_cand = None, None, None else: self._replace_mutator_cand(self.current_student_arch) output = self.model(input_data) @@ -374,10 +378,10 @@ def train_one_epoch(self, epoch): meters.update(metrics) # update prioritized board - self._update_prioritized_board(input_data, teacher_output, output, metrics['top1'], cand_flops) + self._update_prioritized_board(input_data, teacher_output, output, metrics['prec1'], cand_flops) if self.main_proc and (step % self.log_frequency == 0 or step + 1 == self.steps_per_epoch): - self.logger.info("Epoch [%d/%d] Step [%d/%d] %s", epoch + 1, self.num_epochs, + logger.info("Epoch [%d/%d] Step [%d/%d] %s", epoch + 1, self.num_epochs, step + 1, len(self.train_loader), meters) if self.main_proc and self.num_epochs == epoch + 1: From c162f3916fa5c6bca4fa5812597232747e0e6dad Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 23 Nov 2020 22:26:32 +0800 Subject: [PATCH 47/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 532aecbeb6..f4724be84f 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -1,6 +1,6 @@ # Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search -**[[Paper]](https://papers.nips.cc/paper/2020/file/d072677d210ac4c03ba046120f0802ec-Paper.pdf) [[Models-Google Drive]](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing)[[Models-Baidu Disk]](https://pan.baidu.com/s/4hymmwni) [[BibTex]](https://scholar.googleusercontent.com/scholar.bib?q=info:ICWVXc_SsKAJ:scholar.google.com/&output=citation&scisdr=CgUmooXfEMfTi0cV5aU:AAGBfm0AAAAAX7sQ_aXoamdKRaBI12tAVN8REq1VKNwM&scisig=AAGBfm0AAAAAX7sQ_RdYtp6BSro3zgbXVJU2MCgsG730&scisf=4&ct=citation&cd=-1&hl=ja)**
    +**[[Paper]](https://papers.nips.cc/paper/2020/file/d072677d210ac4c03ba046120f0802ec-Paper.pdf) [[Models-Google Drive]](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing)[[Models-Baidu Disk (PWD: wqw6)]](https://pan.baidu.com/s/4hymmwni) [[BibTex]](https://scholar.googleusercontent.com/scholar.bib?q=info:ICWVXc_SsKAJ:scholar.google.com/&output=citation&scisdr=CgUmooXfEMfTi0cV5aU:AAGBfm0AAAAAX7sQ_aXoamdKRaBI12tAVN8REq1VKNwM&scisig=AAGBfm0AAAAAX7sQ_RdYtp6BSro3zgbXVJU2MCgsG730&scisf=4&ct=citation&cd=-1&hl=ja)**
    In this work, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. The discovered architectures achieve superior performance compared to the recent [MobileNetV3](https://arxiv.org/abs/1905.02244) and [EfficientNet](https://arxiv.org/abs/1905.11946) families under aligned settings. @@ -15,8 +15,8 @@ The training with 16 Gpus is a little bit superior than 8 Gpus, as below. | Model (M Flops) | 8Gpus | 16Gpus | | ---- |:-------------:| :-----:| -| 14M | 59.3 | 59.6 | -| 42M | 65.8 | 66.5 | +| 14M | 53.7 | 53.7 | +| 43M | 65.8 | 66.5 | | 114M | 72.1 | 72.8 | | 287M | 76.7 | 77.6 | | 481M | 78.9 | 79.2 | @@ -112,4 +112,4 @@ After downloading the pretrained models and adding `MODEL_SELECTION` and `RESUME ``` python -m torch.distributed.launch --nproc_per_node=8 ./test.py --cfg ./configs/test.yaml -``` \ No newline at end of file +``` From de8c261d02d0a8c5f62170a0a667a6a02d9a46ec Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Mon, 23 Nov 2020 22:31:32 +0800 Subject: [PATCH 48/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index f4724be84f..e5d13f3ee4 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -1,6 +1,6 @@ # Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search -**[[Paper]](https://papers.nips.cc/paper/2020/file/d072677d210ac4c03ba046120f0802ec-Paper.pdf) [[Models-Google Drive]](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing)[[Models-Baidu Disk (PWD: wqw6)]](https://pan.baidu.com/s/4hymmwni) [[BibTex]](https://scholar.googleusercontent.com/scholar.bib?q=info:ICWVXc_SsKAJ:scholar.google.com/&output=citation&scisdr=CgUmooXfEMfTi0cV5aU:AAGBfm0AAAAAX7sQ_aXoamdKRaBI12tAVN8REq1VKNwM&scisig=AAGBfm0AAAAAX7sQ_RdYtp6BSro3zgbXVJU2MCgsG730&scisf=4&ct=citation&cd=-1&hl=ja)**
    +**[[Paper]](https://papers.nips.cc/paper/2020/file/d072677d210ac4c03ba046120f0802ec-Paper.pdf) [[Models-Google Drive]](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing)[[Models-Baidu Disk (PWD: wqw6)]](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g) [[BibTex]](https://scholar.googleusercontent.com/scholar.bib?q=info:ICWVXc_SsKAJ:scholar.google.com/&output=citation&scisdr=CgUmooXfEMfTi0cV5aU:AAGBfm0AAAAAX7sQ_aXoamdKRaBI12tAVN8REq1VKNwM&scisig=AAGBfm0AAAAAX7sQ_RdYtp6BSro3zgbXVJU2MCgsG730&scisf=4&ct=citation&cd=-1&hl=ja)**
    In this work, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. The discovered architectures achieve superior performance compared to the recent [MobileNetV3](https://arxiv.org/abs/1905.02244) and [EfficientNet](https://arxiv.org/abs/1905.11946) families under aligned settings. @@ -76,7 +76,7 @@ The searched architectures need to be retrained and obtain the final model. The To train searched architectures, you need to configure the parameter `MODEL_SELECTION` to specify the model Flops. To specify which model to train, you should add `MODEL_SELECTION` in `./configs/retrain.yaml`. You can select one from [14,42,112,287,481,604], which stands for different Flops(MB). ```buildoutcfg -MODEL_SELECTION: 42 # Retrain 42m model +MODEL_SELECTION: 43 # Retrain 43m model MODEL_SELECTION: 481 # Retrain 481m model ...... ``` @@ -93,7 +93,7 @@ python -m torch.distributed.launch --nproc_per_node=8 ./retrain.py --cfg ./confi To test our trained of models, you need to use `MODEL_SELECTION` in `./configs/test.yaml` to specify which model to test. ``` -MODEL_SELECTION: 42 # test 42m model +MODEL_SELECTION: 43 # test 43m model MODEL_SELECTION: 481 # test 470m model ...... ``` @@ -101,12 +101,12 @@ MODEL_SELECTION: 481 # test 470m model After specifying the flops of the model, you need to write the path to the resume model in `./test.sh`. ``` -RESUME_PATH: './42.pth.tar' +RESUME_PATH: './43.pth.tar' RESUME_PATH: './481.pth.tar' ...... ``` -We provide 14M/42M/114M/287M/481M/604M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). +We provide 14M/43M/114M/287M/481M/604M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2) or [[Models-Baidu Disk (password: wqw6)]](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g) . After downloading the pretrained models and adding `MODEL_SELECTION` and `RESUME_PATH` in './configs/test.yaml', you need to use the following command to test the model. From ae457872dc9090811f0487a086d769aafcad704c Mon Sep 17 00:00:00 2001 From: lzuqer <252840273@qq.com> Date: Mon, 23 Nov 2020 22:43:02 +0800 Subject: [PATCH 49/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 532aecbeb6..658a210922 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -16,7 +16,7 @@ The training with 16 Gpus is a little bit superior than 8 Gpus, as below. | Model (M Flops) | 8Gpus | 16Gpus | | ---- |:-------------:| :-----:| | 14M | 59.3 | 59.6 | -| 42M | 65.8 | 66.5 | +| 43M | 65.8 | 66.5 | | 114M | 72.1 | 72.8 | | 287M | 76.7 | 77.6 | | 481M | 78.9 | 79.2 | @@ -74,9 +74,9 @@ The searched architectures need to be retrained and obtain the final model. The ### II. Retrain -To train searched architectures, you need to configure the parameter `MODEL_SELECTION` to specify the model Flops. To specify which model to train, you should add `MODEL_SELECTION` in `./configs/retrain.yaml`. You can select one from [14,42,112,287,481,604], which stands for different Flops(MB). +To train searched architectures, you need to configure the parameter `MODEL_SELECTION` to specify the model Flops. To specify which model to train, you should add `MODEL_SELECTION` in `./configs/retrain.yaml`. You can select one from [14,43,112,287,481,604], which stands for different Flops(MB). ```buildoutcfg -MODEL_SELECTION: 42 # Retrain 42m model +MODEL_SELECTION: 43 # Retrain 43m model MODEL_SELECTION: 481 # Retrain 481m model ...... ``` @@ -93,7 +93,7 @@ python -m torch.distributed.launch --nproc_per_node=8 ./retrain.py --cfg ./confi To test our trained of models, you need to use `MODEL_SELECTION` in `./configs/test.yaml` to specify which model to test. ``` -MODEL_SELECTION: 42 # test 42m model +MODEL_SELECTION: 43 # test 43m model MODEL_SELECTION: 481 # test 470m model ...... ``` @@ -101,15 +101,15 @@ MODEL_SELECTION: 481 # test 470m model After specifying the flops of the model, you need to write the path to the resume model in `./test.sh`. ``` -RESUME_PATH: './42.pth.tar' +RESUME_PATH: './43.pth.tar' RESUME_PATH: './481.pth.tar' ...... ``` -We provide 14M/42M/114M/287M/481M/604M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). +We provide 14M/43M/114M/287M/481M/604M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). After downloading the pretrained models and adding `MODEL_SELECTION` and `RESUME_PATH` in './configs/test.yaml', you need to use the following command to test the model. ``` python -m torch.distributed.launch --nproc_per_node=8 ./test.py --cfg ./configs/test.yaml -``` \ No newline at end of file +``` From 43101c1bda48cab428a42494187e55acd09608db Mon Sep 17 00:00:00 2001 From: lzuqer <252840273@qq.com> Date: Mon, 23 Nov 2020 22:44:26 +0800 Subject: [PATCH 50/62] Update retrain.py --- examples/nas/cream/retrain.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/nas/cream/retrain.py b/examples/nas/cream/retrain.py index 5b2187a9bd..d95a2b93bd 100644 --- a/examples/nas/cream/retrain.py +++ b/examples/nas/cream/retrain.py @@ -65,7 +65,7 @@ def main(): 3, 3, 3, 3], [ 3, 3, 3, 3], [0]] cfg.DATASET.IMAGE_SIZE = 224 - elif cfg.NET.SELECTION == 42: + elif cfg.NET.SELECTION == 43: arch_list = [[0], [3], [3, 1], [3, 1], [3, 3, 3], [3, 3], [0]] cfg.DATASET.IMAGE_SIZE = 96 elif cfg.NET.SELECTION == 14: From 36ddeafd5d03df147c1eb1e7aa88fdfa85528918 Mon Sep 17 00:00:00 2001 From: lzuqer <252840273@qq.com> Date: Mon, 23 Nov 2020 22:46:22 +0800 Subject: [PATCH 51/62] Update test.py --- examples/nas/cream/test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/nas/cream/test.py b/examples/nas/cream/test.py index 0c3fa183a6..67ee822853 100644 --- a/examples/nas/cream/test.py +++ b/examples/nas/cream/test.py @@ -49,7 +49,7 @@ def main(): writer, logger = None, None # retrain model selection - if cfg.NET.SELECTION == 470: + if cfg.NET.SELECTION == 481: arch_list = [ [0], [ 3, 4, 3, 1], [ @@ -58,7 +58,7 @@ def main(): 3, 3, 3, 3], [ 3, 3, 3, 3], [0]] cfg.DATASET.IMAGE_SIZE = 224 - elif cfg.NET.SELECTION == 42: + elif cfg.NET.SELECTION == 43: arch_list = [[0], [3], [3, 1], [3, 1], [3, 3, 3], [3, 3], [0]] cfg.DATASET.IMAGE_SIZE = 96 elif cfg.NET.SELECTION == 14: @@ -67,10 +67,10 @@ def main(): elif cfg.NET.SELECTION == 112: arch_list = [[0], [3], [3, 3], [3, 3], [3, 3, 3], [3, 3], [0]] cfg.DATASET.IMAGE_SIZE = 160 - elif cfg.NET.SELECTION == 285: + elif cfg.NET.SELECTION == 287: arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] cfg.DATASET.IMAGE_SIZE = 224 - elif cfg.NET.SELECTION == 600: + elif cfg.NET.SELECTION == 604: arch_list = [[0], [3, 3, 2, 3, 3], [3, 2, 3, 2, 3], [3, 2, 3, 2, 3], [3, 3, 2, 2, 3, 3], [3, 3, 2, 3, 3, 3], [0]] cfg.DATASET.IMAGE_SIZE = 224 From 97451af58dd530c2f58e9857012dec80d96f0330 Mon Sep 17 00:00:00 2001 From: lzuqer <252840273@qq.com> Date: Mon, 23 Nov 2020 22:47:09 +0800 Subject: [PATCH 52/62] Update retrain.py --- examples/nas/cream/retrain.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/nas/cream/retrain.py b/examples/nas/cream/retrain.py index d95a2b93bd..7c64b23b12 100644 --- a/examples/nas/cream/retrain.py +++ b/examples/nas/cream/retrain.py @@ -56,7 +56,7 @@ def main(): writer, logger = None, None # retrain model selection - if cfg.NET.SELECTION == 470: + if cfg.NET.SELECTION == 481: arch_list = [ [0], [ 3, 4, 3, 1], [ @@ -74,10 +74,10 @@ def main(): elif cfg.NET.SELECTION == 112: arch_list = [[0], [3], [3, 3], [3, 3], [3, 3, 3], [3, 3], [0]] cfg.DATASET.IMAGE_SIZE = 160 - elif cfg.NET.SELECTION == 285: + elif cfg.NET.SELECTION == 287: arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] cfg.DATASET.IMAGE_SIZE = 224 - elif cfg.NET.SELECTION == 600: + elif cfg.NET.SELECTION == 604: arch_list = [ [0], [ 3, 3, 2, 3, 3], [ From 8d24833d95a34ecc41c1f0566e02f91673eeab24 Mon Sep 17 00:00:00 2001 From: mapleam Date: Mon, 23 Nov 2020 23:22:07 +0800 Subject: [PATCH 53/62] version 4.0 --- docs/en_US/NAS/Cream.md | 18 ++++++++---------- examples/nas/cream/lib/config.py | 1 + examples/nas/cream/retrain.py | 3 +++ 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 3dc2171a76..73b9e869b3 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -1,6 +1,6 @@ # Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search -**[[Paper]](https://papers.nips.cc/paper/2020/file/d072677d210ac4c03ba046120f0802ec-Paper.pdf) [[Models-Google Drive]](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing)[[Models-Baidu Disk]](https://pan.baidu.com/s/4hymmwni) [[BibTex]](https://scholar.googleusercontent.com/scholar.bib?q=info:ICWVXc_SsKAJ:scholar.google.com/&output=citation&scisdr=CgUmooXfEMfTi0cV5aU:AAGBfm0AAAAAX7sQ_aXoamdKRaBI12tAVN8REq1VKNwM&scisig=AAGBfm0AAAAAX7sQ_RdYtp6BSro3zgbXVJU2MCgsG730&scisf=4&ct=citation&cd=-1&hl=ja)**
    +**[[Paper]](https://papers.nips.cc/paper/2020/file/d072677d210ac4c03ba046120f0802ec-Paper.pdf) [[Models-Google Drive]](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing)[[Models-Baidu Disk (PWD: wqw6)]](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g) [[BibTex]](https://scholar.googleusercontent.com/scholar.bib?q=info:ICWVXc_SsKAJ:scholar.google.com/&output=citation&scisdr=CgUmooXfEMfTi0cV5aU:AAGBfm0AAAAAX7sQ_aXoamdKRaBI12tAVN8REq1VKNwM&scisig=AAGBfm0AAAAAX7sQ_RdYtp6BSro3zgbXVJU2MCgsG730&scisf=4&ct=citation&cd=-1&hl=ja)**
    In this work, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop. The discovered architectures achieve superior performance compared to the recent [MobileNetV3](https://arxiv.org/abs/1905.02244) and [EfficientNet](https://arxiv.org/abs/1905.11946) families under aligned settings. @@ -15,8 +15,8 @@ The training with 16 Gpus is a little bit superior than 8 Gpus, as below. | Model (M Flops) | 8Gpus | 16Gpus | | ---- |:-------------:| :-----:| -| 14M | 59.3 | 59.6 | -| 42M | 65.8 | 66.5 | +| 14M | 53.7 | 53.7 | +| 43M | 65.8 | 66.5 | | 114M | 72.1 | 72.8 | | 287M | 76.7 | 77.6 | | 481M | 78.9 | 79.2 | @@ -74,9 +74,9 @@ The searched architectures need to be retrained and obtain the final model. The ### II. Retrain -To train searched architectures, you need to configure the parameter `MODEL_SELECTION` to specify the model Flops. To specify which model to train, you should add `MODEL_SELECTION` in `./configs/retrain.yaml`. You can select one from [14,42,112,287,481,604], which stands for different Flops(MB). +To train searched architectures, you need to configure the parameter `MODEL_SELECTION` to specify the model Flops. To specify which model to train, you should add `MODEL_SELECTION` in `./configs/retrain.yaml`. You can select one from [14,43,112,287,481,604], which stands for different Flops(MB). ```buildoutcfg -MODEL_SELECTION: 42 # Retrain 42m model +MODEL_SELECTION: 43 # Retrain 43m model MODEL_SELECTION: 481 # Retrain 481m model ...... ``` @@ -93,7 +93,7 @@ python -m torch.distributed.launch --nproc_per_node=8 ./retrain.py --cfg ./confi To test our trained of models, you need to use `MODEL_SELECTION` in `./configs/test.yaml` to specify which model to test. ``` -MODEL_SELECTION: 42 # test 42m model +MODEL_SELECTION: 43 # test 43m model MODEL_SELECTION: 481 # test 470m model ...... ``` @@ -101,17 +101,15 @@ MODEL_SELECTION: 481 # test 470m model After specifying the flops of the model, you need to write the path to the resume model in `./test.sh`. ``` -RESUME_PATH: './42.pth.tar' +RESUME_PATH: './43.pth.tar' RESUME_PATH: './481.pth.tar' ...... ``` -We provide 14M/42M/114M/287M/481M/604M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2). +We provide 14M/43M/114M/287M/481M/604M pretrained models in [google drive](https://drive.google.com/drive/folders/1CQjyBryZ4F20Rutj7coF8HWFcedApUn2) or [[Models-Baidu Disk (password: wqw6)]](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g) . After downloading the pretrained models and adding `MODEL_SELECTION` and `RESUME_PATH` in './configs/test.yaml', you need to use the following command to test the model. ``` python -m torch.distributed.launch --nproc_per_node=8 ./test.py --cfg ./configs/test.yaml ``` - - diff --git a/examples/nas/cream/lib/config.py b/examples/nas/cream/lib/config.py index a605f08dc7..fd50b4a9a5 100644 --- a/examples/nas/cream/lib/config.py +++ b/examples/nas/cream/lib/config.py @@ -53,6 +53,7 @@ __C.NET.SELECTION = 14 __C.NET.GP = 'avg' # type of global pool ["avg", "max", "avgmax", "avgmaxc"] __C.NET.DROPOUT_RATE = 0.0 # dropout rate +__C.NET.INPUT_ARCH = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] # model ema parameters __C.NET.EMA = CN() diff --git a/examples/nas/cream/retrain.py b/examples/nas/cream/retrain.py index 5b2187a9bd..f33ff97de3 100644 --- a/examples/nas/cream/retrain.py +++ b/examples/nas/cream/retrain.py @@ -86,6 +86,9 @@ def main(): 3, 3, 2, 2, 3, 3], [ 3, 3, 2, 3, 3, 3], [0]] cfg.DATASET.IMAGE_SIZE = 224 + elif cfg.NET.SELECTION == -1: + arch_list = cfg.NET.INPUT_ARCH + cfg.DATASET.IMAGE_SIZE = 224 else: raise ValueError("Model Retrain Selection is not Supported!") From cce57e5318f921e679450124d7242655177da93a Mon Sep 17 00:00:00 2001 From: mapleam Date: Mon, 23 Nov 2020 23:29:52 +0800 Subject: [PATCH 54/62] version 4.0 --- docs/en_US/NAS/Cream.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 73b9e869b3..f88b58d8fa 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -81,7 +81,12 @@ MODEL_SELECTION: 481 # Retrain 481m model ...... ``` - +To train random architectures, you need specify `MODEL_SELECTION` to `-1` and configure the parameter `INPUT_ARCH`: +```buildoutcfg +MODEL_SELECTION: -1 # Train random architectures +INPUT_ARCH: [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] # Random Architectures +...... +``` After adding `MODEL_SELECTION` in `./configs/retrain.yaml`, you need to use the following command to train the model. ```buildoutcfg From 0f8f8bf3d8d3752f2f4e64466b6de3661fcd09c7 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Tue, 24 Nov 2020 11:11:29 +0800 Subject: [PATCH 55/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index f88b58d8fa..d59bdfaef8 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -15,7 +15,7 @@ The training with 16 Gpus is a little bit superior than 8 Gpus, as below. | Model (M Flops) | 8Gpus | 16Gpus | | ---- |:-------------:| :-----:| -| 14M | 53.7 | 53.7 | +| 14M | 53.7 | 53.8 | | 43M | 65.8 | 66.5 | | 114M | 72.1 | 72.8 | | 287M | 76.7 | 77.6 | From 879bfeb1f5b871d4875c2f7aa2479a6db2f69cb1 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Tue, 24 Nov 2020 19:34:34 +0800 Subject: [PATCH 56/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index d59bdfaef8..9e34819b91 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -93,7 +93,7 @@ After adding `MODEL_SELECTION` in `./configs/retrain.yaml`, you need to use the python -m torch.distributed.launch --nproc_per_node=8 ./retrain.py --cfg ./configs/retrain.yaml ``` -### II. Test +### III. Test To test our trained of models, you need to use `MODEL_SELECTION` in `./configs/test.yaml` to specify which model to test. From 0cf817b5979ecb8955ada0bdc7c14dee51443b5c Mon Sep 17 00:00:00 2001 From: Yuge Zhang Date: Tue, 24 Nov 2020 20:12:05 +0800 Subject: [PATCH 57/62] Move code dir --- examples/nas/cream/train.py | 4 ++-- .../nni => nni/algorithms}/nas/pytorch/cream/__init__.py | 0 .../pynni/nni => nni/algorithms}/nas/pytorch/cream/trainer.py | 0 .../pynni/nni => nni/algorithms}/nas/pytorch/cream/utils.py | 0 4 files changed, 2 insertions(+), 2 deletions(-) rename {src/sdk/pynni/nni => nni/algorithms}/nas/pytorch/cream/__init__.py (100%) rename {src/sdk/pynni/nni => nni/algorithms}/nas/pytorch/cream/trainer.py (100%) rename {src/sdk/pynni/nni => nni/algorithms}/nas/pytorch/cream/utils.py (100%) diff --git a/examples/nas/cream/train.py b/examples/nas/cream/train.py index 3a515e136d..50d340c1ef 100644 --- a/examples/nas/cream/train.py +++ b/examples/nas/cream/train.py @@ -33,8 +33,8 @@ from nni.nas.pytorch.callbacks import LRSchedulerCallback from nni.nas.pytorch.callbacks import ModelCheckpoint -from nni.nas.pytorch.cream import CreamSupernetTrainer -from nni.nas.pytorch.random import RandomMutator +from nni.algorithms.nas.pytorch.cream import CreamSupernetTrainer +from nni.algorithms.nas.pytorch.random import RandomMutator def main(): args, cfg = parse_config_args('nni.cream.supernet') diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/__init__.py b/nni/algorithms/nas/pytorch/cream/__init__.py similarity index 100% rename from src/sdk/pynni/nni/nas/pytorch/cream/__init__.py rename to nni/algorithms/nas/pytorch/cream/__init__.py diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/trainer.py b/nni/algorithms/nas/pytorch/cream/trainer.py similarity index 100% rename from src/sdk/pynni/nni/nas/pytorch/cream/trainer.py rename to nni/algorithms/nas/pytorch/cream/trainer.py diff --git a/src/sdk/pynni/nni/nas/pytorch/cream/utils.py b/nni/algorithms/nas/pytorch/cream/utils.py similarity index 100% rename from src/sdk/pynni/nni/nas/pytorch/cream/utils.py rename to nni/algorithms/nas/pytorch/cream/utils.py From fdeb0b9f1c273c27de8d87e44b96f73ef7e4029d Mon Sep 17 00:00:00 2001 From: Yuge Zhang Date: Wed, 25 Nov 2020 11:44:07 +0800 Subject: [PATCH 58/62] Fix trainer and retrain optimizer --- examples/nas/cream/configs/retrain.yaml | 4 ++-- nni/algorithms/nas/pytorch/cream/trainer.py | 8 ++++---- nni/algorithms/nas/pytorch/cream/utils.py | 5 ++++- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/examples/nas/cream/configs/retrain.yaml b/examples/nas/cream/configs/retrain.yaml index f042bb1af2..2339dea982 100644 --- a/examples/nas/cream/configs/retrain.yaml +++ b/examples/nas/cream/configs/retrain.yaml @@ -19,7 +19,7 @@ DATASET: NUM_CLASSES: 1000 IMAGE_SIZE: 224 # image patch size INTERPOLATION: 'random' # Image resize interpolation type - BATCH_SIZE: 128 # batch size + BATCH_SIZE: 32 # batch size NO_PREFECHTER: False NET: @@ -32,7 +32,7 @@ NET: FORCE_CPU: False # force model ema to be tracked on CPU DECAY: 0.9998 -OPT: 'rmsproptf' +OPT: 'sgd' OPT_EPS: 1e-2 MOMENTUM: 0.9 DECAY_RATE: 0.1 diff --git a/nni/algorithms/nas/pytorch/cream/trainer.py b/nni/algorithms/nas/pytorch/cream/trainer.py index 2026de4db3..f65b135e0b 100644 --- a/nni/algorithms/nas/pytorch/cream/trainer.py +++ b/nni/algorithms/nas/pytorch/cream/trainer.py @@ -66,7 +66,6 @@ class CreamSupernetTrainer(Trainer): Callbacks to plug into the trainer. See Callbacks. """ - def __init__(self, model, loss, val_loss, optimizer, num_epochs, train_loader, valid_loader, mutator=None, batch_size=64, log_frequency=None, @@ -77,7 +76,7 @@ def __init__(self, model, loss, val_loss, assert torch.cuda.is_available() super(CreamSupernetTrainer, self).__init__(model, mutator, loss, None, optimizer, num_epochs, None, None, - batch_size, None, None, log_frequency, callbacks) + batch_size, None, None, log_frequency, callbacks) self.model = model self.loss = loss self.val_loss = val_loss @@ -253,7 +252,7 @@ def _forward_training(self, x, meta_value): soft_label = torch.nn.functional.softmax(teacher_output, dim=1) kd_loss = meta_value * \ - self._cross_entropy_loss_with_soft_target(output, soft_label) + self._cross_entropy_loss_with_soft_target(output, soft_label) return kd_loss # calculate soft target loss @@ -327,6 +326,7 @@ def _get_cand_flops(self, cand): def train_one_epoch(self, epoch): self.current_epoch = epoch meters = AverageMeterGroup() + self.steps_per_epoch = len(self.train_loader) for step, (input_data, target) in enumerate(self.train_loader): self.mutator.reset() self.current_student_arch = self.mutator._cache @@ -382,7 +382,7 @@ def train_one_epoch(self, epoch): if self.main_proc and (step % self.log_frequency == 0 or step + 1 == self.steps_per_epoch): logger.info("Epoch [%d/%d] Step [%d/%d] %s", epoch + 1, self.num_epochs, - step + 1, len(self.train_loader), meters) + step + 1, len(self.train_loader), meters) if self.main_proc and self.num_epochs == epoch + 1: for idx, i in enumerate(self.best_children_pool): diff --git a/nni/algorithms/nas/pytorch/cream/utils.py b/nni/algorithms/nas/pytorch/cream/utils.py index b72f1d8d75..e0542b2f3e 100644 --- a/nni/algorithms/nas/pytorch/cream/utils.py +++ b/nni/algorithms/nas/pytorch/cream/utils.py @@ -5,6 +5,7 @@ import os import torch.distributed as dist + def accuracy(output, target, topk=(1,)): """ Computes the precision@k for the specified values of k """ maxk = max(topk) @@ -24,11 +25,13 @@ def accuracy(output, target, topk=(1,)): res.append(correct_k.mul_(1.0 / batch_size)) return res + def reduce_metrics(metrics): return {k: reduce_tensor(v).item() for k, v in metrics.items()} + def reduce_tensor(tensor): rt = tensor.clone() dist.all_reduce(rt, op=dist.ReduceOp.SUM) rt /= float(os.environ["WORLD_SIZE"]) - return rt \ No newline at end of file + return rt From d11e4cfb1083914dd0c977149f19d8e3859e4b28 Mon Sep 17 00:00:00 2001 From: Houwen Peng <49014385+penghouwen@users.noreply.github.com> Date: Wed, 25 Nov 2020 12:18:42 +0800 Subject: [PATCH 59/62] Update Cream.md --- docs/en_US/NAS/Cream.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 9e34819b91..ad3d6775b9 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -53,6 +53,10 @@ First, build environments for searching. ``` pip install -r ./requirements + +git clone https://github.com/NVIDIA/apex.git +cd apex +python setup.py install --cpp_ext --cuda_ext ``` To search for an architecture, you need to configure the parameters `FLOPS_MINIMUM` and `FLOPS_MAXIMUM` to specify the desired model flops, such as [0,600]MB flops. You can specify the flops interval by changing these two parameters in `./configs/train.yaml` From 06af2cb128e32ac75bfade135115621bbce6edbf Mon Sep 17 00:00:00 2001 From: Yuge Zhang Date: Thu, 26 Nov 2020 14:10:55 +0800 Subject: [PATCH 60/62] Fix syntax warning --- examples/nas/cream/lib/utils/flops_table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/nas/cream/lib/utils/flops_table.py b/examples/nas/cream/lib/utils/flops_table.py index 3e03028d1c..254241a075 100644 --- a/examples/nas/cream/lib/utils/flops_table.py +++ b/examples/nas/cream/lib/utils/flops_table.py @@ -72,7 +72,7 @@ def get_params(self, arch): def get_flops(self, arch): flops = 0 for block_id, block in enumerate(arch): - if block is 'LayerChoice1' or block_id is 'LayerChoice23': + if block == 'LayerChoice1' or block_id == 'LayerChoice23': continue for idx, choice in enumerate(arch[block]): flops += self.flops_dict[block_id][idx] * (1 if choice else 0) From 6cb3b977b53639400bc51dcd6e3c35a9969cb56b Mon Sep 17 00:00:00 2001 From: Yuge Zhang Date: Thu, 26 Nov 2020 20:10:34 +0800 Subject: [PATCH 61/62] Fix syntax warning (again) --- nni/algorithms/nas/pytorch/cream/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nni/algorithms/nas/pytorch/cream/trainer.py b/nni/algorithms/nas/pytorch/cream/trainer.py index f65b135e0b..0c5136d1b4 100644 --- a/nni/algorithms/nas/pytorch/cream/trainer.py +++ b/nni/algorithms/nas/pytorch/cream/trainer.py @@ -317,7 +317,7 @@ def _run_update(self, input, target, batch_idx): def _get_cand_flops(self, cand): flops = 0 for block_id, block in enumerate(cand): - if block is 'LayerChoice1' or block_id is 'LayerChoice23': + if block == 'LayerChoice1' or block_id == 'LayerChoice23': continue for idx, choice in enumerate(cand[block]): flops += self.flops_dict[block_id][idx] * (1 if choice else 0) From 999609819f60dcfce69945b5caa59db25d7e8246 Mon Sep 17 00:00:00 2001 From: Yuge Zhang Date: Thu, 26 Nov 2020 21:38:30 +0800 Subject: [PATCH 62/62] Fix docs build warnings --- docs/en_US/NAS/CDARTS.md | 8 ++++---- docs/en_US/NAS/Cream.md | 9 ++++++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/docs/en_US/NAS/CDARTS.md b/docs/en_US/NAS/CDARTS.md index 2836f8b681..07f8faf22d 100644 --- a/docs/en_US/NAS/CDARTS.md +++ b/docs/en_US/NAS/CDARTS.md @@ -44,16 +44,16 @@ bash run_retrain_cifar.sh ### PyTorch ```eval_rst -.. autoclass:: nni.nas.pytorch.cdarts.CdartsTrainer +.. autoclass:: nni.algorithms.nas.pytorch.cdarts.CdartsTrainer :members: -.. autoclass:: nni.nas.pytorch.cdarts.RegularizedDartsMutator +.. autoclass:: nni.algorithms.nas.pytorch.cdarts.RegularizedDartsMutator :members: -.. autoclass:: nni.nas.pytorch.cdarts.DartsDiscreteMutator +.. autoclass:: nni.algorithms.nas.pytorch.cdarts.DartsDiscreteMutator :members: -.. autoclass:: nni.nas.pytorch.cdarts.RegularizedMutatorParallel +.. autoclass:: nni.algorithms.nas.pytorch.cdarts.RegularizedMutatorParallel :members: ``` diff --git a/docs/en_US/NAS/Cream.md b/docs/en_US/NAS/Cream.md index 9e34819b91..d431e576d9 100644 --- a/docs/en_US/NAS/Cream.md +++ b/docs/en_US/NAS/Cream.md @@ -75,21 +75,24 @@ The searched architectures need to be retrained and obtain the final model. The ### II. Retrain To train searched architectures, you need to configure the parameter `MODEL_SELECTION` to specify the model Flops. To specify which model to train, you should add `MODEL_SELECTION` in `./configs/retrain.yaml`. You can select one from [14,43,112,287,481,604], which stands for different Flops(MB). -```buildoutcfg + +``` MODEL_SELECTION: 43 # Retrain 43m model MODEL_SELECTION: 481 # Retrain 481m model ...... ``` To train random architectures, you need specify `MODEL_SELECTION` to `-1` and configure the parameter `INPUT_ARCH`: -```buildoutcfg + +``` MODEL_SELECTION: -1 # Train random architectures INPUT_ARCH: [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] # Random Architectures ...... ``` After adding `MODEL_SELECTION` in `./configs/retrain.yaml`, you need to use the following command to train the model. -```buildoutcfg + +``` python -m torch.distributed.launch --nproc_per_node=8 ./retrain.py --cfg ./configs/retrain.yaml ```